From d958d172dce2a003c56d3d358cb58da2cf0d0c5b Mon Sep 17 00:00:00 2001 From: Julius Hinze Date: Tue, 24 Jun 2025 18:20:38 +0200 Subject: [PATCH 01/10] chore: remove global name validation scheme --- .../scripts/run-integration-tests-group.sh | 4 +- .../workflows/scripts/run-unit-tests-group.sh | 2 +- .golangci.yml | 1 + Makefile | 70 +++--- .../mimir-ingest-storage/compose-up.sh | 2 +- .../mimir-microservices-mode/compose-up.sh | 4 +- .../mimir-monolithic-mode/compose-up.sh | 2 +- go.mod | 16 +- go.sum | 52 +++-- integration/kv_test.go | 4 +- pkg/cardinality/request.go | 2 +- pkg/distributor/distributor.go | 15 +- pkg/distributor/validate.go | 7 +- pkg/distributor/validate_test.go | 68 +++++- pkg/frontend/querymiddleware/error_caching.go | 3 +- .../querymiddleware/request_validation.go | 7 - .../request_validation_test.go | 4 +- pkg/mimir/modules.go | 2 + pkg/mimir/promexts.go | 5 - pkg/mimirtool/rules/rules.go | 4 +- .../cardinality_analysis_handler_test.go | 4 +- pkg/querier/querier.go | 1 + pkg/querier/stats_renderer_test.go | 7 - pkg/ruler/api.go | 2 +- pkg/ruler/compat.go | 2 + pkg/ruler/manager.go | 5 +- pkg/ruler/ruler.go | 2 +- .../compat/name_validating_engine.go | 88 +++++++ pkg/streamingpromql/engine_test.go | 13 +- .../aggregations/aggregation_test.go | 12 +- .../operators/aggregations/count_values.go | 6 +- .../aggregations/count_values_test.go | 23 +- .../operators/functions/factories.go | 50 ++-- .../operators/functions/label.go | 10 +- pkg/streamingpromql/planning.go | 14 +- .../planning/core/aggregate_expression.go | 11 +- .../planning/core/function_call.go | 2 +- pkg/streamingpromql/planning/plan.go | 2 + pkg/streamingpromql/query.go | 5 +- pkg/util/validation/limits.go | 6 + tools/benchmark-query-engine/main.go | 2 +- .../alertmanager/config/notifiers.go | 2 +- .../alertmanager/matchers/compat/parse.go | 2 +- .../alertmanager/notify/webhook/webhook.go | 3 +- .../prometheus/common/expfmt/decode.go | 20 +- .../expfmt/decode_globalvalidationscheme.go | 48 ++++ .../expfmt/decode_localvalidationscheme.go | 52 +++++ .../prometheus/common/model/alert.go | 6 +- .../model/alert_globalvalidationscheme.go | 21 ++ .../model/alert_localvalidationscheme.go | 21 ++ .../prometheus/common/model/labels.go | 41 +--- .../model/labels_globalvalidationscheme.go | 53 +++++ .../model/labels_localvalidationscheme.go | 55 +++++ .../prometheus/common/model/labelset.go | 27 +-- .../model/labelset_globalvalidationscheme.go | 45 ++++ .../model/labelset_localvalidationscheme.go | 46 ++++ .../prometheus/common/model/metric.go | 48 +--- .../model/metric_globalvalidationscheme.go | 43 ++++ .../model/metric_localvalidationscheme.go | 22 ++ .../prometheus/common/model/silence.go | 9 +- .../model/silence_globalvalidationscheme.go | 26 +++ .../model/silence_localvalidationscheme.go | 26 +++ .../contrib/bridges/prometheus/producer.go | 10 +- .../otel/exporters/prometheus/config.go | 21 +- .../otel/exporters/prometheus/exporter.go | 219 ++++++++++-------- .../exporter_globalvalidationscheme.go | 15 ++ .../exporter_localvalidationscheme.go | 15 ++ .../otel/sdk/metric/periodic_reader.go | 6 +- .../otel/sdk/metric/pipeline.go | 21 +- .../otel/sdk/metric/version.go | 2 +- vendor/modules.txt | 17 +- 71 files changed, 1079 insertions(+), 404 deletions(-) create mode 100644 pkg/streamingpromql/compat/name_validating_engine.go create mode 100644 vendor/github.com/prometheus/common/expfmt/decode_globalvalidationscheme.go create mode 100644 vendor/github.com/prometheus/common/expfmt/decode_localvalidationscheme.go create mode 100644 vendor/github.com/prometheus/common/model/alert_globalvalidationscheme.go create mode 100644 vendor/github.com/prometheus/common/model/alert_localvalidationscheme.go create mode 100644 vendor/github.com/prometheus/common/model/labels_globalvalidationscheme.go create mode 100644 vendor/github.com/prometheus/common/model/labels_localvalidationscheme.go create mode 100644 vendor/github.com/prometheus/common/model/labelset_globalvalidationscheme.go create mode 100644 vendor/github.com/prometheus/common/model/labelset_localvalidationscheme.go create mode 100644 vendor/github.com/prometheus/common/model/metric_globalvalidationscheme.go create mode 100644 vendor/github.com/prometheus/common/model/metric_localvalidationscheme.go create mode 100644 vendor/github.com/prometheus/common/model/silence_globalvalidationscheme.go create mode 100644 vendor/github.com/prometheus/common/model/silence_localvalidationscheme.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_globalvalidationscheme.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_localvalidationscheme.go diff --git a/.github/workflows/scripts/run-integration-tests-group.sh b/.github/workflows/scripts/run-integration-tests-group.sh index cdd0759687c..4d43668216b 100755 --- a/.github/workflows/scripts/run-integration-tests-group.sh +++ b/.github/workflows/scripts/run-integration-tests-group.sh @@ -37,7 +37,7 @@ if [[ -z "$TOTAL" ]]; then fi # List all tests. -ALL_TESTS=$(go test -tags=requires_docker,stringlabels -list 'Test.*' "${INTEGRATION_DIR}/..." | grep -E '^Test.*' | sort) +ALL_TESTS=$(go test -tags=requires_docker,stringlabels,localvalidationscheme -list 'Test.*' "${INTEGRATION_DIR}/..." | grep -E '^Test.*' | sort) # Filter tests by the requested group. GROUP_TESTS=$(echo "$ALL_TESTS" | awk -v TOTAL="$TOTAL" -v INDEX="$INDEX" 'NR % TOTAL == INDEX') @@ -58,4 +58,4 @@ REGEX="${REGEX})$" # that integration tests will fail on data races. export MIMIR_ENV_VARS_JSON='{"GORACE": "halt_on_error=1"}' -exec go test -tags=requires_docker,stringlabels -timeout 2400s -v -count=1 -run "${REGEX}" "${INTEGRATION_DIR}/..." +exec go test -tags=requires_docker,stringlabels,localvalidationscheme -timeout 2400s -v -count=1 -run "${REGEX}" "${INTEGRATION_DIR}/..." diff --git a/.github/workflows/scripts/run-unit-tests-group.sh b/.github/workflows/scripts/run-unit-tests-group.sh index 5959f3ddd37..b82bd1b5668 100755 --- a/.github/workflows/scripts/run-unit-tests-group.sh +++ b/.github/workflows/scripts/run-unit-tests-group.sh @@ -47,4 +47,4 @@ echo "$GROUP_TESTS" echo "" # shellcheck disable=SC2086 # we *want* word splitting of GROUP_TESTS. -exec go test -tags=netgo,stringlabels -timeout 30m -race ${GROUP_TESTS} +exec go test -tags=netgo,stringlabels,localvalidationscheme -timeout 30m -race ${GROUP_TESTS} diff --git a/.golangci.yml b/.golangci.yml index 2a37d5be725..cbeee530971 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -10,6 +10,7 @@ run: - stringlabels - requires_docker - requires_libpcap + - localvalidationscheme output: formats: text: diff --git a/Makefile b/Makefile index ae92cd78a43..210073f773f 100644 --- a/Makefile +++ b/Makefile @@ -261,7 +261,7 @@ GO_FLAGS := -ldflags "\ -X $(MIMIR_VERSION).Branch=$(GIT_BRANCH) \ -X $(MIMIR_VERSION).Revision=$(GIT_REVISION) \ -X $(MIMIR_VERSION).Version=$(VERSION) \ - -extldflags \"-static\" -s -w" -tags netgo,stringlabels + -extldflags \"-static\" -s -w" -tags netgo,stringlabels,localvalidationscheme ifeq ($(BUILD_IN_CONTAINER),true) @@ -318,7 +318,7 @@ lint: check-makefiles golangci-lint run # Ensure no blocklisted package is imported. - GOFLAGS="-tags=requires_docker,stringlabels" faillint -paths "github.com/bmizerany/assert=github.com/stretchr/testify/assert,\ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/bmizerany/assert=github.com/stretchr/testify/assert,\ golang.org/x/net/context=context,\ sync/atomic=go.uber.org/atomic,\ regexp=github.com/grafana/regexp,\ @@ -328,31 +328,31 @@ lint: check-makefiles github.com/weaveworks/common/user.{ExtractOrgIDFromHTTPRequest}=github.com/grafana/mimir/pkg/tenant.{ExtractTenantIDFromHTTPRequest}" ./pkg/... ./cmd/... ./tools/... ./integration/... # Ensure clean pkg structure. - faillint -paths "\ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "\ github.com/grafana/mimir/pkg/scheduler,\ github.com/grafana/mimir/pkg/frontend,\ github.com/grafana/mimir/pkg/frontend/transport,\ github.com/grafana/mimir/pkg/frontend/v1,\ github.com/grafana/mimir/pkg/frontend/v2" \ ./pkg/querier/... - faillint -paths "github.com/grafana/mimir/pkg/querier/..." ./pkg/scheduler/... - faillint -paths "github.com/grafana/mimir/pkg/storage/tsdb/..." ./pkg/storage/bucket/... - faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/alertmanager/alertspb/... - faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/ruler/rulespb/... - faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/storage/sharding/... - faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/querier/engine/... - faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/querier/api/... - faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/util/math/... - - # Ensure all errors are report as APIError - faillint -paths "github.com/weaveworks/common/httpgrpc.{Errorf}=github.com/grafana/mimir/pkg/api/error.Newf" ./pkg/frontend/querymiddleware/... + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/querier/..." ./pkg/scheduler/... + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/storage/tsdb/..." ./pkg/storage/bucket/... + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/alertmanager/alertspb/... + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/ruler/rulespb/... + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/storage/sharding/... + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/querier/engine/... + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/querier/api/... + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/util/math/... + + # Ensure all errors are reported as APIError + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/weaveworks/common/httpgrpc.{Errorf}=github.com/grafana/mimir/pkg/api/error.Newf" ./pkg/frontend/querymiddleware/... # errors.Cause() only work on errors wrapped by github.com/pkg/errors, while it doesn't work # on errors wrapped by golang standard errors package. In Mimir we currently use github.com/pkg/errors # but other vendors we depend on (e.g. Prometheus) just uses the standard errors package. # For this reason, we recommend to not use errors.Cause() anywhere, so that we don't have to # question whether the usage is safe or not. - faillint -paths "github.com/pkg/errors.{Cause}" ./pkg/... ./cmd/... ./tools/... ./integration/... + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/pkg/errors.{Cause}" ./pkg/... ./cmd/... ./tools/... ./integration/... # gogo/status allows to easily customize error details while grpc/status doesn't: # for this reason we use gogo/status in several places. However, gogo/status.FromError() @@ -361,13 +361,13 @@ lint: check-makefiles # Since we want support for errors wrapping everywhere, to avoid subtle bugs depending # on which status package is imported, we don't allow .FromError() from both packages # and we require to use grpcutil.ErrorToStatus() instead. - faillint -paths "\ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "\ google.golang.org/grpc/status.{FromError}=github.com/grafana/dskit/grpcutil.ErrorToStatus,\ github.com/gogo/status.{FromError}=github.com/grafana/dskit/grpcutil.ErrorToStatus" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # Ensure the query path is supporting multiple tenants - faillint -paths "\ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "\ github.com/grafana/mimir/pkg/tenant.{TenantID}=github.com/grafana/mimir/pkg/tenant.{TenantIDs}" \ ./pkg/scheduler/... \ ./pkg/frontend/... \ @@ -375,7 +375,7 @@ lint: check-makefiles ./pkg/frontend/querymiddleware/... # Ensure packages that no longer use a global logger don't reintroduce it - faillint -paths "github.com/grafana/mimir/pkg/util/log.{Logger}" \ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/util/log.{Logger}" \ ./pkg/alertmanager/... \ ./pkg/compactor/... \ ./pkg/distributor/... \ @@ -390,22 +390,22 @@ lint: check-makefiles # We've copied github.com/NYTimes/gziphandler to pkg/util/gziphandler # at least until https://github.com/nytimes/gziphandler/pull/112 is merged - faillint -paths "github.com/NYTimes/gziphandler" \ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/NYTimes/gziphandler" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # We don't want to use yaml.v2 anywhere, because we use yaml.v3 now, # and UnamrshalYAML signature is not compatible between them. - faillint -paths "gopkg.in/yaml.v2" \ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "gopkg.in/yaml.v2" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # Ensure packages we imported from Thanos are no longer used. - GOFLAGS="-tags=requires_docker,stringlabels" faillint -paths \ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ "github.com/thanos-io/thanos/pkg/..." \ ./pkg/... ./cmd/... ./tools/... ./integration/... # Ensure we never use the default registerer and we allow to use a custom one (improves testability). # Also, ensure we use promauto.With() to reduce the chances we forget to register metrics. - faillint -paths \ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ "github.com/prometheus/client_golang/prometheus/promauto.{NewCounter,NewCounterVec,NewCounterFunc,NewGauge,NewGaugeVec,NewGaugeFunc,NewSummary,NewSummaryVec,NewHistogram,NewHistogramVec}=github.com/prometheus/client_golang/prometheus/promauto.With,\ github.com/prometheus/client_golang/prometheus.{MustRegister,Register,DefaultRegisterer}=github.com/prometheus/client_golang/prometheus/promauto.With,\ github.com/prometheus/client_golang/prometheus.{NewCounter,NewCounterVec,NewCounterFunc,NewGauge,NewGaugeVec,NewGaugeFunc,NewSummary,NewSummaryVec,NewHistogram,NewHistogramVec}=github.com/prometheus/client_golang/prometheus/promauto.With" \ @@ -414,7 +414,7 @@ lint: check-makefiles # Use the faster slices.Sort where we can. # Note that we don't automatically suggest replacing sort.Float64s() with slices.Sort() as the documentation for slices.Sort() # at the time of writing warns that slices.Sort() may not correctly handle NaN values. - faillint -paths \ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ "sort.{Strings,Ints}=slices.Sort" \ ./pkg/... ./cmd/... ./tools/... ./integration/... @@ -425,44 +425,44 @@ lint: check-makefiles # Don't use generic ring.Read operation. # ring.Read usually isn't the right choice, and we prefer that each component define its operations explicitly. - faillint -paths \ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ "github.com/grafana/dskit/ring.{Read}" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # Do not directly call flag.Parse() and argument getters, to try to reduce risk of misuse. - faillint -paths \ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ "flag.{Parse,NArg,Arg,Args}=github.com/grafana/dskit/flagext.{ParseFlagsAndArguments,ParseFlagsWithoutArguments}" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # Ensure we use our custom gRPC clients. - faillint -paths \ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ "github.com/grafana/mimir/pkg/storegateway/storegatewaypb.{NewStoreGatewayClient}=github.com/grafana/mimir/pkg/storegateway/storegatewaypb.NewCustomStoreGatewayClient" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # Prefer using WithCancelCause in production code, so that cancelled contexts have more information available from context.Cause(ctx). - faillint -ignore-tests -paths \ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -ignore-tests -paths \ "context.{WithCancel}=context.WithCancelCause" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # Do not use the object storage client intended only for tools within Mimir itself - faillint -paths \ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ "github.com/grafana/mimir/pkg/util/objtools" \ ./pkg/... ./cmd/... ./integration/... # Use the more performant metadata.ValueFromIncomingContext wherever possible (if not possible, we can always put # a lint ignore directive to skip linting). - faillint -paths \ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ "google.golang.org/grpc/metadata.{FromIncomingContext}=google.golang.org/grpc/metadata.ValueFromIncomingContext" \ ./pkg/... ./cmd/... ./integration/... # We don't use topic auto-creation because we don't control the num.partitions. # As a result the topic can be created with the wrong number of partitions. - faillint -paths \ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ "github.com/twmb/franz-go/pkg/kgo.{AllowAutoTopicCreation}" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # We don't use opentracing anymore. - faillint -paths \ + GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ "github.com/opentracing/opentracing-go,github.com/opentracing/opentracing-go/log,github.com/uber/jaeger-client-go,github.com/opentracing-contrib/go-stdlib/nethttp" \ ./pkg/... ./cmd/... ./tools/... ./integration/... @@ -485,12 +485,12 @@ print-go-version: ## Print the go version. @go version | awk '{print $$3}' | sed 's/go//' test-with-race: ## Run all unit tests with data race detect. - go test -tags netgo,stringlabels -timeout 30m -race -count 1 ./... + go test -tags netgo,stringlabels,localvalidationscheme -timeout 30m -race -count 1 ./... cover: ## Run all unit tests with code coverage and generates reports. $(eval COVERDIR := $(shell mktemp -d coverage.XXXXXXXXXX)) $(eval COVERFILE := $(shell mktemp $(COVERDIR)/unit.XXXXXXXXXX)) - go test -tags netgo,stringlabels -timeout 30m -race -count 1 -coverprofile=$(COVERFILE) ./... + go test -tags netgo,stringlabels,localvalidationscheme -timeout 30m -race -count 1 -coverprofile=$(COVERFILE) ./... go tool cover -html=$(COVERFILE) -o cover.html go tool cover -func=cover.html | tail -n1 @@ -778,12 +778,12 @@ check-mimir-read-write-mode-docker-compose-yaml: ## Check the jsonnet and docker integration-tests: ## Run all integration tests. integration-tests: cmd/mimir/$(UPTODATE) - go test -tags=requires_docker,stringlabels ./integration/... + go test -tags=requires_docker,stringlabels,localvalidationscheme ./integration/... integration-tests-race: ## Run all integration tests with race-enabled distroless docker image. integration-tests-race: export MIMIR_IMAGE=$(IMAGE_PREFIX)mimir:$(IMAGE_TAG_RACE) integration-tests-race: cmd/mimir/$(UPTODATE_RACE) - go test -timeout 30m -tags=requires_docker,stringlabels ./integration/... + go test -timeout 30m -tags=requires_docker,stringlabels,localvalidationscheme ./integration/... # Those vars are needed for packages target export VERSION diff --git a/development/mimir-ingest-storage/compose-up.sh b/development/mimir-ingest-storage/compose-up.sh index b3d9d1c7a13..c0018b92b8d 100755 --- a/development/mimir-ingest-storage/compose-up.sh +++ b/development/mimir-ingest-storage/compose-up.sh @@ -20,7 +20,7 @@ cd "$SCRIPT_DIR" && make # -gcflags "all=-N -l" disables optimizations that allow for better run with combination with Delve debugger. # GOARCH is not changed. -CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/mimir "${SCRIPT_DIR}"/../../cmd/mimir +CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels,localvalidationscheme -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/mimir "${SCRIPT_DIR}"/../../cmd/mimir docker_compose -f "${SCRIPT_DIR}"/docker-compose.yml build --build-arg BUILD_IMAGE="${BUILD_IMAGE}" mimir-write-zone-a-0 docker_compose -f "${SCRIPT_DIR}"/docker-compose.yml up "$@" diff --git a/development/mimir-microservices-mode/compose-up.sh b/development/mimir-microservices-mode/compose-up.sh index 63d184f6026..b69e4fe43ae 100755 --- a/development/mimir-microservices-mode/compose-up.sh +++ b/development/mimir-microservices-mode/compose-up.sh @@ -22,12 +22,12 @@ cd "$SCRIPT_DIR" && make # -gcflags "all=-N -l" disables optimizations that allow for better run with combination with Delve debugger. # GOARCH is not changed. -CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/mimir "${SCRIPT_DIR}"/../../cmd/mimir +CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels,localvalidationscheme -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/mimir "${SCRIPT_DIR}"/../../cmd/mimir docker_compose -f "${SCRIPT_DIR}"/docker-compose.yml build --build-arg BUILD_IMAGE="${BUILD_IMAGE}" distributor-1 if [ "$(yq '.services.query-tee' "${SCRIPT_DIR}"/docker-compose.yml)" != "null" ]; then # If query-tee is enabled, build its binary and image as well. - CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/../../cmd/query-tee "${SCRIPT_DIR}"/../../cmd/query-tee + CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels,localvalidationscheme -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/../../cmd/query-tee "${SCRIPT_DIR}"/../../cmd/query-tee docker_compose -f "${SCRIPT_DIR}"/docker-compose.yml build --build-arg BUILD_IMAGE="${BUILD_IMAGE}" query-tee fi diff --git a/development/mimir-monolithic-mode/compose-up.sh b/development/mimir-monolithic-mode/compose-up.sh index e0bc61bf838..9c01d8948eb 100755 --- a/development/mimir-monolithic-mode/compose-up.sh +++ b/development/mimir-monolithic-mode/compose-up.sh @@ -42,7 +42,7 @@ fi # -gcflags "all=-N -l" disables optimizations that allow for better run with combination with Delve debugger. # GOARCH is not changed. -CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/mimir "${SCRIPT_DIR}"/../../cmd/mimir +CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels,localvalidationscheme -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/mimir "${SCRIPT_DIR}"/../../cmd/mimir docker_compose -f "${SCRIPT_DIR}"/docker-compose.yml build mimir-1 && \ docker_compose -f "${SCRIPT_DIR}"/docker-compose.yml "${PROFILES[@]}" up "${ARGS[@]}" diff --git a/go.mod b/go.mod index 73cff470d74..e2784933aa9 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/prometheus/alertmanager v0.28.1 github.com/prometheus/client_golang v1.23.0-rc.1 github.com/prometheus/client_model v0.6.2 - github.com/prometheus/common v0.65.1-0.20250711183725-0e1982f10d4c + github.com/prometheus/common v0.65.1-0.20250714091050-c6ae72fb63e9 github.com/prometheus/prometheus v1.99.0 github.com/segmentio/fasthash v1.0.3 github.com/sirupsen/logrus v1.9.3 @@ -202,7 +202,7 @@ require ( go.opentelemetry.io/otel/log v0.12.2 // indirect go.opentelemetry.io/otel/log/logtest v0.0.0-20250528051624-65b8067f18f1 // indirect go.opentelemetry.io/otel/sdk/log v0.12.2 // indirect - go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/mail.v2 v2.3.1 // indirect gopkg.in/telebot.v3 v3.2.1 // indirect @@ -345,6 +345,9 @@ require ( replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v1.8.2-0.20250725113505-6dd7af9abc56 +// https://github.com/grafana/prometheus-alertmanager/pull/118 +replace github.com/prometheus/alertmanager => github.com/juliusmh/alertmanager v0.26.1-0.20250624114102-96969065f8d2 + // Replace memberlist with our fork which includes some fixes that haven't been // merged upstream yet: // - https://github.com/hashicorp/memberlist/pull/260 @@ -369,12 +372,15 @@ replace github.com/opentracing-contrib/go-stdlib => github.com/grafana/opentraci // Replace opentracing-contrib/go-grpc with a fork until https://github.com/opentracing-contrib/go-grpc/pull/16 is merged. replace github.com/opentracing-contrib/go-grpc => github.com/charleskorn/go-grpc v0.0.0-20231024023642-e9298576254f -// Replacing prometheus/alertmanager with our fork. -replace github.com/prometheus/alertmanager => github.com/grafana/prometheus-alertmanager v0.25.1-0.20250620093340-be61a673dee6 - // Use Mimir fork of prometheus/otlptranslator to allow for higher velocity of upstream development, // while allowing Mimir to move at a more conservative pace. replace github.com/prometheus/otlptranslator => github.com/grafana/mimir-otlptranslator v0.0.0-20250703083430-c31a9568ad96 // Replace objstore with a fork containing https://github.com/thanos-io/objstore/pull/181. replace github.com/thanos-io/objstore => github.com/charleskorn/objstore v0.0.0-20250527065533-21d4c0c463eb + +replace go.opentelemetry.io/contrib/bridges/prometheus => github.com/aknuds1/opentelemetry-go-contrib/bridges/prometheus v0.0.0-20250716061915-e4a04e1efdd8 + +replace go.opentelemetry.io/otel/exporters/prometheus => github.com/aknuds1/opentelemetry-go/exporters/prometheus v0.0.0-20250714105753-6d10dabef4d5 + +replace go.opentelemetry.io/otel => github.com/aknuds1/opentelemetry-go v0.0.0-20250714105753-6d10dabef4d5 diff --git a/go.sum b/go.sum index ba49f67276c..4bf8b488e99 100644 --- a/go.sum +++ b/go.sum @@ -126,6 +126,12 @@ github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Sereal/Sereal/Go/sereal v0.0.0-20231009093132-b9187f1a92c6/go.mod h1:JwrycNnC8+sZPDyzM3MQ86LvaGzSpfxg885KOOwFRW4= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/aknuds1/opentelemetry-go v0.0.0-20250714105753-6d10dabef4d5 h1:VmoJU/HB75Anuqd6lSeTz+7oROgFsFC/8BRjK7b/Vto= +github.com/aknuds1/opentelemetry-go v0.0.0-20250714105753-6d10dabef4d5/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +github.com/aknuds1/opentelemetry-go-contrib/bridges/prometheus v0.0.0-20250716061915-e4a04e1efdd8 h1:J6DHtSSL7iCTwCyYASS9E/ee2jEAHvhPn3ndpyt1hWU= +github.com/aknuds1/opentelemetry-go-contrib/bridges/prometheus v0.0.0-20250716061915-e4a04e1efdd8/go.mod h1:99W6ktc5NGVXAJ6atj2cZ/oZTVyDdHinFf60q13ktL8= +github.com/aknuds1/opentelemetry-go/exporters/prometheus v0.0.0-20250714105753-6d10dabef4d5 h1:NfkvUV/EKTJ8LoX411bFXTXMNJKqgWd2lT81Nn5nRLY= +github.com/aknuds1/opentelemetry-go/exporters/prometheus v0.0.0-20250714105753-6d10dabef4d5/go.mod h1:R8GpRXTZrqvXHDEGVH5bF6+JqAZcK8PjJcZ5nGhEWiE= github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= github.com/alecthomas/chroma/v2 v2.19.0 h1:Im+SLRgT8maArxv81mULDWN8oKxkzboH07CHesxElq4= @@ -577,8 +583,6 @@ github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8= github.com/grafana/otel-profiling-go v0.5.1/go.mod h1:ftN/t5A/4gQI19/8MoWurBEtC6gFw8Dns1sJZ9W4Tls= -github.com/grafana/prometheus-alertmanager v0.25.1-0.20250620093340-be61a673dee6 h1:oJnbhG6ZNy10AjsgNeAtAKeGHogIGOMfAsBH6fYYa5M= -github.com/grafana/prometheus-alertmanager v0.25.1-0.20250620093340-be61a673dee6/go.mod h1:O/QP1BCm0HHIzbKvgMzqb5sSyH88rzkFk84F4TfJjBU= github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/grafana/regexp v0.0.0-20240531075221-3685f1377d7b h1:oMAq12GxTpwo9jxbnG/M4F/HdpwbibTaVoxNA0NZprY= @@ -713,6 +717,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/juliusmh/alertmanager v0.26.1-0.20250624114102-96969065f8d2 h1:J2VAb425QgLA/NEI/GK/jksDaiTqHQQPEK7mZ+LEQNI= +github.com/juliusmh/alertmanager v0.26.1-0.20250624114102-96969065f8d2/go.mod h1:O/QP1BCm0HHIzbKvgMzqb5sSyH88rzkFk84F4TfJjBU= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= @@ -872,6 +878,7 @@ github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwy github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -906,8 +913,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.65.1-0.20250711183725-0e1982f10d4c h1:LfxkKdkGF+3fC5ZiHv5sWtEMH+STn+Edwx78s+W95QU= -github.com/prometheus/common v0.65.1-0.20250711183725-0e1982f10d4c/go.mod h1:LL3lcZII3UXGO4InbF+BTSsiAAPUBnwFVbp4gBWIMqw= +github.com/prometheus/common v0.65.1-0.20250714091050-c6ae72fb63e9 h1:W+mk95PFPPi5NOzr2MtiGe7BXlHmsxs7UESIGsW5S08= +github.com/prometheus/common v0.65.1-0.20250714091050-c6ae72fb63e9/go.mod h1:41VB7D5p4TG2i2w5P4G62ofoS2mVyeTQ9QIAKYE60TE= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= @@ -930,6 +937,7 @@ github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= @@ -1107,8 +1115,6 @@ go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCu go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns= go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 h1:u2E32P7j1a/gRgZDWhIXC+Shd4rLg70mnE7QLI/Ssnw= go.opentelemetry.io/contrib/bridges/otelzap v0.11.0/go.mod h1:pJPCLM8gzX4ASqLlyAXjHBEYxgbOQJ/9bidWxD6PEPQ= -go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 h1:RyrtJzu5MAmIcbRrwg75b+w3RlZCP0vJByDVzcpAe3M= -go.opentelemetry.io/contrib/bridges/prometheus v0.61.0/go.mod h1:tirr4p9NXbzjlbruiRGp53IzlYrDk5CO2fdHj0sSSaY= go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 h1:XfzKtKSrbtYk9TNCF8dkO0Y9M7IOfb4idCwBOTwGBiI= @@ -1123,9 +1129,6 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 h1:UIrZgRBHUrYRlJ4V419lVb go.opentelemetry.io/contrib/propagators/jaeger v1.35.0/go.mod h1:0ciyFyYZxE6JqRAQvIgGRabKWDUmNdW3GAQb6y/RlFU= go.opentelemetry.io/contrib/samplers/jaegerremote v0.30.0 h1:bQ1Gvah4Sp8z7epSkgJaNTuZm7sutfA6Fji2/7cKFMc= go.opentelemetry.io/contrib/samplers/jaegerremote v0.30.0/go.mod h1:9b8Q9rH52NgYH3ShiTFB5wf18Vt3RTH/VMB7LDcC1ug= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 h1:06ZeJRe5BnYXceSM9Vya83XXVaNGe3H1QqsvqRANQq8= @@ -1142,8 +1145,6 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ= -go.opentelemetry.io/otel/exporters/prometheus v0.58.0 h1:CJAxWKFIqdBennqxJyOgnt5LqkeFRT+Mz3Yjz3hL+h8= -go.opentelemetry.io/otel/exporters/prometheus v0.58.0/go.mod h1:7qo/4CLI+zYSNbv0GMNquzuss2FVZo3OYrGh96n4HNc= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 h1:12vMqzLLNZtXuXbJhSENRg+Vvx+ynNilV8twBLBsXMY= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2/go.mod h1:ZccPZoPOoq8x3Trik/fCsba7DEYDUnN6yX79pgp2BUQ= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= @@ -1164,9 +1165,10 @@ go.opentelemetry.io/otel/sdk/log v0.12.2 h1:yNoETvTByVKi7wHvYS6HMcZrN5hFLD7I++1x go.opentelemetry.io/otel/sdk/log v0.12.2/go.mod h1:DcpdmUXHJgSqN/dh+XMWa7Vf89u9ap0/AAk/XGLnEzY= go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc h1:uqxdywfHqqCl6LmZzI3pUnXT1RGFYyUgxj0AkWPFxi0= go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc/go.mod h1:TY/N/FT7dmFrP/r5ym3g0yysP1DefqGpAZr4f82P0dE= -go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= -go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -1197,6 +1199,9 @@ golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1245,6 +1250,9 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1302,6 +1310,9 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1342,6 +1353,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1441,8 +1454,13 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= @@ -1450,6 +1468,9 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1465,6 +1486,9 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1534,6 +1558,8 @@ golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/integration/kv_test.go b/integration/kv_test.go index cf9b725919e..2fb1151b029 100644 --- a/integration/kv_test.go +++ b/integration/kv_test.go @@ -9,7 +9,7 @@ package integration import ( "context" "errors" - "sort" + "slices" "sync" "testing" "time" @@ -40,7 +40,7 @@ func TestKVList(t *testing.T) { // Get list of keys and sort them keys, err := client.List(context.Background(), "") require.NoError(t, err, "could not list keys") - sort.Strings(keys) + slices.Sort(keys) require.Equal(t, keysToCreate, keys, "returned key paths did not match created paths") verifyClientMetricsHistogram(t, reg, "cortex_kv_request_duration_seconds", map[string]uint64{ diff --git a/pkg/cardinality/request.go b/pkg/cardinality/request.go index b42db2e5984..3d837c54039 100644 --- a/pkg/cardinality/request.go +++ b/pkg/cardinality/request.go @@ -263,7 +263,7 @@ func extractLabelNames(values url.Values) ([]model.LabelName, error) { labelNames := make([]model.LabelName, 0, len(labelNamesParams)) for _, labelNameParam := range labelNamesParams { labelName := model.LabelName(labelNameParam) - if !labelName.IsValid() { + if !labelName.IsValid(model.UTF8Validation) { return nil, fmt.Errorf("invalid 'label_names' param '%v'", labelNameParam) } labelNames = append(labelNames, labelName) diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index e3d124e9c54..c8c48f3e2d8 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -66,12 +66,6 @@ import ( var tracer = otel.Tracer("pkg/distributor") -func init() { - // Mimir doesn't support Prometheus' UTF-8 metric/label name scheme yet. - // nolint:staticcheck - model.NameValidationScheme = model.LegacyValidation -} - var ( // Validation errors. errInvalidTenantShardSize = errors.New("invalid tenant shard size, the value must be greater than or equal to zero") @@ -1098,15 +1092,18 @@ func (d *Distributor) prePushRelabelMiddleware(next PushFunc) PushFunc { return err } + dropLabels := d.limits.DropLabels(userID) + relabelConfigs := d.limits.MetricRelabelConfigs(userID) + var removeTsIndexes []int lb := labels.NewBuilder(labels.EmptyLabels()) for tsIdx := 0; tsIdx < len(req.Timeseries); tsIdx++ { ts := req.Timeseries[tsIdx] - if mrc := d.limits.MetricRelabelConfigs(userID); len(mrc) > 0 { + if len(relabelConfigs) > 0 { mimirpb.FromLabelAdaptersToBuilder(ts.Labels, lb) lb.Set(metaLabelTenantID, userID) - keep := relabel.ProcessBuilder(lb, mrc...) + keep := relabel.ProcessBuilder(lb, relabelConfigs...) if !keep { removeTsIndexes = append(removeTsIndexes, tsIdx) continue @@ -1115,7 +1112,7 @@ func (d *Distributor) prePushRelabelMiddleware(next PushFunc) PushFunc { req.Timeseries[tsIdx].SetLabels(mimirpb.FromBuilderToLabelAdapters(lb, ts.Labels)) } - for _, labelName := range d.limits.DropLabels(userID) { + for _, labelName := range dropLabels { req.Timeseries[tsIdx].RemoveLabel(labelName) } diff --git a/pkg/distributor/validate.go b/pkg/distributor/validate.go index 54f2d205f6c..8a52f43cee0 100644 --- a/pkg/distributor/validate.go +++ b/pkg/distributor/validate.go @@ -390,6 +390,7 @@ type labelValidationConfig interface { MaxLabelNamesPerInfoSeries(userID string) int MaxLabelNameLength(userID string) int MaxLabelValueLength(userID string) int + ValidationScheme(userID string) model.ValidationScheme } func removeNonASCIIChars(in string) (out string) { @@ -421,7 +422,9 @@ func validateLabels(m *sampleValidationMetrics, cfg labelValidationConfig, userI return errors.New(noMetricNameMsgFormat) } - if !model.IsValidMetricName(model.LabelValue(unsafeMetricName)) { + validationScheme := cfg.ValidationScheme(userID) + + if !model.IsValidMetricName(model.LabelValue(unsafeMetricName), validationScheme) { cat.IncrementDiscardedSamples(ls, 1, reasonInvalidMetricName, ts) m.invalidMetricName.WithLabelValues(userID, group).Inc() return fmt.Errorf(invalidMetricNameMsgFormat, removeNonASCIIChars(unsafeMetricName)) @@ -447,7 +450,7 @@ func validateLabels(m *sampleValidationMetrics, cfg labelValidationConfig, userI maxLabelValueLength := cfg.MaxLabelValueLength(userID) lastLabelName := "" for _, l := range ls { - if !skipLabelValidation && !model.LabelName(l.Name).IsValid() { + if !skipLabelValidation && !model.LabelName(l.Name).IsValid(validationScheme) { m.invalidLabel.WithLabelValues(userID, group).Inc() cat.IncrementDiscardedSamples(ls, 1, reasonInvalidLabel, ts) return fmt.Errorf(invalidLabelMsgFormat, l.Name, mimirpb.FromLabelAdaptersToString(ls)) diff --git a/pkg/distributor/validate_test.go b/pkg/distributor/validate_test.go index 9945cedf2c0..4880476f811 100644 --- a/pkg/distributor/validate_test.go +++ b/pkg/distributor/validate_test.go @@ -37,6 +37,7 @@ type validateLabelsCfg struct { maxLabelNamesPerInfoSeries int maxLabelNameLength int maxLabelValueLength int + validationScheme model.ValidationScheme } func (v validateLabelsCfg) MaxLabelNamesPerSeries(_ string) int { @@ -55,6 +56,10 @@ func (v validateLabelsCfg) MaxLabelValueLength(_ string) int { return v.maxLabelValueLength } +func (v validateLabelsCfg) ValidationScheme(_ string) model.ValidationScheme { + return v.validationScheme +} + type validateMetadataCfg struct { enforceMetadataMetricName bool maxMetadataLength int @@ -73,13 +78,19 @@ func TestValidateLabels(t *testing.T) { reg := prometheus.NewPedanticRegistry() s := newSampleValidationMetrics(reg) - var cfg validateLabelsCfg - userID := "testUser" + const userID = "testUser" + + legacyConfig := validateLabelsCfg{ + maxLabelNamesPerSeries: 3, + maxLabelNamesPerInfoSeries: 4, + maxLabelNameLength: 25, + maxLabelValueLength: 25, + validationScheme: model.LegacyValidation, + } + + utf8Config := legacyConfig + utf8Config.validationScheme = model.UTF8Validation - cfg.maxLabelValueLength = 25 - cfg.maxLabelNameLength = 25 - cfg.maxLabelNamesPerSeries = 3 - cfg.maxLabelNamesPerInfoSeries = 4 limits := catestutils.NewMockCostAttributionLimits(0, userID, "team") careg := prometheus.NewRegistry() manager, err := costattribution.NewManager(5*time.Second, 10*time.Second, log.NewNopLogger(), limits, reg, careg) @@ -90,30 +101,35 @@ func TestValidateLabels(t *testing.T) { metric model.Metric skipLabelNameValidation bool skipLabelCountValidation bool + config validateLabelsCfg err error }{ { metric: map[model.LabelName]model.LabelValue{"team": "a"}, skipLabelNameValidation: false, skipLabelCountValidation: false, + config: legacyConfig, err: errors.New(noMetricNameMsgFormat), }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: " ", "team": "a"}, skipLabelNameValidation: false, skipLabelCountValidation: false, + config: legacyConfig, err: fmt.Errorf(invalidMetricNameMsgFormat, " "), }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "metric_name_with_\xb0_invalid_utf8_\xb0", "team": "a"}, skipLabelNameValidation: false, skipLabelCountValidation: false, + config: legacyConfig, err: fmt.Errorf(invalidMetricNameMsgFormat, "metric_name_with__invalid_utf8_ (non-ascii characters removed)"), }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "valid", "foo ": "bar", "team": "a"}, skipLabelNameValidation: false, skipLabelCountValidation: false, + config: legacyConfig, err: fmt.Errorf( invalidLabelMsgFormat, "foo ", @@ -130,12 +146,14 @@ func TestValidateLabels(t *testing.T) { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "valid", "team": "c"}, skipLabelNameValidation: false, skipLabelCountValidation: false, + config: legacyConfig, err: nil, }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "badLabelName", "this_is_a_really_really_long_name_that_should_cause_an_error": "test_value_please_ignore", "team": "biz"}, skipLabelNameValidation: false, skipLabelCountValidation: false, + config: legacyConfig, err: fmt.Errorf( labelNameTooLongMsgFormat, "this_is_a_really_really_long_name_that_should_cause_an_error", @@ -152,6 +170,7 @@ func TestValidateLabels(t *testing.T) { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "badLabelValue", "much_shorter_name": "test_value_please_ignore_no_really_nothing_to_see_here", "team": "biz"}, skipLabelNameValidation: false, skipLabelCountValidation: false, + config: legacyConfig, err: LabelValueTooLongError{ Label: mimirpb.LabelAdapter{Name: "much_shorter_name", Value: "test_value_please_ignore_no_really_nothing_to_see_here"}, Limit: 25, @@ -166,6 +185,7 @@ func TestValidateLabels(t *testing.T) { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "bar": "baz", "blip": "blop", "team": "plof"}, skipLabelNameValidation: false, skipLabelCountValidation: false, + config: legacyConfig, err: fmt.Errorf( tooManyLabelsMsgFormat, tooManyLabelsArgs( @@ -184,6 +204,7 @@ func TestValidateLabels(t *testing.T) { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo_info", "bar": "baz", "blip": "blop", "team": "a"}, skipLabelNameValidation: false, skipLabelCountValidation: false, + config: legacyConfig, err: nil, }, { @@ -191,6 +212,7 @@ func TestValidateLabels(t *testing.T) { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo_info", "bar": "baz", "blip": "blop", "blap": "blup", "team": "a"}, skipLabelNameValidation: false, skipLabelCountValidation: false, + config: legacyConfig, err: fmt.Errorf( tooManyInfoLabelsMsgFormat, tooManyLabelsArgs( @@ -209,24 +231,28 @@ func TestValidateLabels(t *testing.T) { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "bar": "baz", "blip": "blop", "team": "a"}, skipLabelNameValidation: false, skipLabelCountValidation: true, + config: legacyConfig, err: nil, }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "invalid%label&name": "bar", "team": "biz"}, skipLabelNameValidation: true, skipLabelCountValidation: false, + config: legacyConfig, err: nil, }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "label1": "你好", "team": "plof"}, skipLabelNameValidation: false, skipLabelCountValidation: false, + config: legacyConfig, err: nil, }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "label1": "abc\xfe\xfddef", "team": "plof"}, skipLabelNameValidation: false, skipLabelCountValidation: false, + config: legacyConfig, err: fmt.Errorf( invalidLabelValueMsgFormat, "label1", "abc\ufffddef", "foo", @@ -236,10 +262,34 @@ func TestValidateLabels(t *testing.T) { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "label1": "abc\xfe\xfddef"}, skipLabelNameValidation: true, skipLabelCountValidation: false, + config: legacyConfig, err: nil, }, + { + metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "name😀": "value", "team": "b"}, + skipLabelNameValidation: false, + skipLabelCountValidation: false, + config: legacyConfig, + err: fmt.Errorf( + invalidLabelMsgFormat, + "name😀", + mimirpb.FromLabelAdaptersToString( + []mimirpb.LabelAdapter{ + {Name: model.MetricNameLabel, Value: "foo"}, + {Name: "name😀", Value: "value"}, + {Name: "team", Value: "b"}, + }, + ), + ), + }, + { + metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "name😀": "value", "team": "b"}, + skipLabelNameValidation: false, + skipLabelCountValidation: false, + config: utf8Config, + }, } { - err := validateLabels(s, cfg, userID, "custom label", mimirpb.FromMetricsToLabelAdapters(c.metric), c.skipLabelNameValidation, c.skipLabelCountValidation, cast, ts) + err := validateLabels(s, c.config, userID, "custom label", mimirpb.FromMetricsToLabelAdapters(c.metric), c.skipLabelNameValidation, c.skipLabelCountValidation, cast, ts) assert.Equal(t, c.err, err, "wrong error") } @@ -249,7 +299,7 @@ func TestValidateLabels(t *testing.T) { require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` # HELP cortex_discarded_samples_total The total number of samples that were discarded. # TYPE cortex_discarded_samples_total counter - cortex_discarded_samples_total{group="custom label",reason="label_invalid",user="testUser"} 1 + cortex_discarded_samples_total{group="custom label",reason="label_invalid",user="testUser"} 2 cortex_discarded_samples_total{group="custom label",reason="label_name_too_long",user="testUser"} 1 cortex_discarded_samples_total{group="custom label",reason="label_value_invalid",user="testUser"} 1 cortex_discarded_samples_total{group="custom label",reason="label_value_too_long",user="testUser"} 1 @@ -264,6 +314,7 @@ func TestValidateLabels(t *testing.T) { # HELP cortex_discarded_attributed_samples_total The total number of samples that were discarded per attribution. # TYPE cortex_discarded_attributed_samples_total counter cortex_discarded_attributed_samples_total{reason="label_invalid",team="a",tenant="testUser",tracker="cost-attribution"} 1 + cortex_discarded_attributed_samples_total{reason="label_invalid",team="b",tenant="testUser",tracker="cost-attribution"} 1 cortex_discarded_attributed_samples_total{reason="label_name_too_long",team="biz",tenant="testUser",tracker="cost-attribution"} 1 cortex_discarded_attributed_samples_total{reason="label_value_invalid",team="plof",tenant="testUser",tracker="cost-attribution"} 1 cortex_discarded_attributed_samples_total{reason="label_value_too_long",team="biz",tenant="testUser",tracker="cost-attribution"} 1 @@ -451,6 +502,7 @@ func TestValidateLabelDuplication(t *testing.T) { cfg.maxLabelNameLength = 10 cfg.maxLabelNamesPerSeries = 10 cfg.maxLabelValueLength = 10 + cfg.validationScheme = model.LegacyValidation userID := "testUser" actual := validateLabels(newSampleValidationMetrics(nil), cfg, userID, "", []mimirpb.LabelAdapter{ diff --git a/pkg/frontend/querymiddleware/error_caching.go b/pkg/frontend/querymiddleware/error_caching.go index 417bb86bf23..cb5cf3a80bf 100644 --- a/pkg/frontend/querymiddleware/error_caching.go +++ b/pkg/frontend/querymiddleware/error_caching.go @@ -15,6 +15,7 @@ import ( "github.com/grafana/dskit/tracing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/common/model" apierror "github.com/grafana/mimir/pkg/api/error" "github.com/grafana/mimir/pkg/util/spanlogger" @@ -196,7 +197,7 @@ func (e *errorCachingHandler) isCacheable(apiErr *apierror.APIError) (bool, stri func addWithExemplar(ctx context.Context, counter prometheus.Counter, val float64) { if traceID, traceOK := tracing.ExtractSampledTraceID(ctx); traceOK { - counter.(prometheus.ExemplarAdder).AddWithExemplar(val, prometheus.Labels{"trace_id": traceID, "traceID": traceID}) + counter.(prometheus.ExemplarAdder).AddWithExemplar(val, prometheus.Labels{"trace_id": traceID, "traceID": traceID}, model.UTF8Validation) } else { // If there is no trace ID, just add to the counter. counter.Add(val) diff --git a/pkg/frontend/querymiddleware/request_validation.go b/pkg/frontend/querymiddleware/request_validation.go index 6db11683dc0..3d05c5f2f74 100644 --- a/pkg/frontend/querymiddleware/request_validation.go +++ b/pkg/frontend/querymiddleware/request_validation.go @@ -7,15 +7,8 @@ import ( "net/http" "github.com/grafana/dskit/cancellation" - "github.com/prometheus/common/model" ) -func init() { - // Mimir doesn't support Prometheus' UTF-8 metric/label name scheme yet. - // nolint:staticcheck - model.NameValidationScheme = model.LegacyValidation -} - const requestValidationFailedFmt = "request validation failed for " var errMetricsQueryRequestValidationFailed = cancellation.NewErrorf( diff --git a/pkg/frontend/querymiddleware/request_validation_test.go b/pkg/frontend/querymiddleware/request_validation_test.go index 7d32570f0d5..54e0d9234af 100644 --- a/pkg/frontend/querymiddleware/request_validation_test.go +++ b/pkg/frontend/querymiddleware/request_validation_test.go @@ -208,9 +208,9 @@ func TestCardinalityQueryRequestValidationRoundTripper(t *testing.T) { expectedErrType: apierror.TypeBadData, }, { - // non-utf8 label name will be rejected even when we transition to UTF-8 label names + // non-legacy label name will be accepted url: cardinalityLabelValuesPathSuffix + "?label_names[]=\\xbd\\xb2\\x3d\\xbc\\x20\\xe2\\x8c\\x98", - expectedErrType: apierror.TypeBadData, + expectedErrType: "", }, { url: cardinalityLabelValuesPathSuffix + "?label_names[]=foo", diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go index 7d732e165d6..4b476d7b87e 100644 --- a/pkg/mimir/modules.go +++ b/pkg/mimir/modules.go @@ -821,6 +821,8 @@ func (t *Mimir) initQueryFrontendTripperware() (serv services.Service, err error panic(fmt.Sprintf("invalid config not caught by validation: unknown PromQL engine '%s'", t.Cfg.Querier.QueryEngine)) } + eng = streamingpromqlcompat.NameValidatingEngine(eng, t.Overrides) + tripperware, err := querymiddleware.NewTripperware( t.Cfg.Frontend.QueryMiddleware, util_log.Logger, diff --git a/pkg/mimir/promexts.go b/pkg/mimir/promexts.go index cabc1122543..79356aaf6d9 100644 --- a/pkg/mimir/promexts.go +++ b/pkg/mimir/promexts.go @@ -3,14 +3,9 @@ package mimir import ( - "github.com/prometheus/common/model" - "github.com/grafana/mimir/pkg/util/promqlext" ) func init() { promqlext.ExtendPromQL() - // Mimir doesn't support Prometheus' UTF-8 metric/label name scheme yet. - // nolint:staticcheck - model.NameValidationScheme = model.LegacyValidation } diff --git a/pkg/mimirtool/rules/rules.go b/pkg/mimirtool/rules/rules.go index 71de931eb76..df03ef620a8 100644 --- a/pkg/mimirtool/rules/rules.go +++ b/pkg/mimirtool/rules/rules.go @@ -9,6 +9,7 @@ import ( "fmt" "strings" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/rulefmt" "github.com/prometheus/prometheus/promql/parser" log "github.com/sirupsen/logrus" @@ -255,7 +256,8 @@ func (r RuleNamespace) Validate(groupNodes []rulefmt.RuleGroupNode) []error { func ValidateRuleGroup(g rwrulefmt.RuleGroup, node rulefmt.RuleGroupNode) []error { var errs []error for i, r := range g.Rules { - for _, err := range r.Validate(node.Rules[i]) { + // TODO(juliusmh): fixed to legacy validation + for _, err := range r.Validate(node.Rules[i], model.LegacyValidation) { var ruleName string if r.Alert != "" { ruleName = r.Alert diff --git a/pkg/querier/cardinality_analysis_handler_test.go b/pkg/querier/cardinality_analysis_handler_test.go index 433cad44f03..9f41af82ca7 100644 --- a/pkg/querier/cardinality_analysis_handler_test.go +++ b/pkg/querier/cardinality_analysis_handler_test.go @@ -706,8 +706,8 @@ func TestLabelValuesCardinalityHandler_ParseError(t *testing.T) { expectedErrorMessage: "'label_names[]' param is required", }, "label_names param is invalid": { - url: "/label_values?label_names[]=olá", - expectedErrorMessage: "invalid 'label_names' param 'olá'", + url: "/label_values?label_names[]=\xff\xfe", + expectedErrorMessage: "invalid 'label_names' param '\xff\xfe'", }, "multiple selector params are provided": { url: "/label_values?label_names[]=hello&selector=foo&selector=bar", diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index b4d46f8b634..f7c752baca7 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -203,6 +203,7 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, quer panic(fmt.Sprintf("invalid config not caught by validation: unknown PromQL engine '%s'", cfg.QueryEngine)) } + eng = compat.NameValidatingEngine(eng, limits) return NewSampleAndChunkQueryable(lazyQueryable), exemplarQueryable, eng, nil } diff --git a/pkg/querier/stats_renderer_test.go b/pkg/querier/stats_renderer_test.go index bc9b12f3716..761f13dda06 100644 --- a/pkg/querier/stats_renderer_test.go +++ b/pkg/querier/stats_renderer_test.go @@ -13,7 +13,6 @@ import ( "github.com/grafana/dskit/user" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/common/route" "github.com/prometheus/prometheus/config" @@ -26,12 +25,6 @@ import ( mimir_stats "github.com/grafana/mimir/pkg/querier/stats" ) -func init() { - // Mimir doesn't support Prometheus' UTF-8 metric/label name scheme yet. - // nolint:staticcheck - model.NameValidationScheme = model.LegacyValidation -} - func TestStatsRenderer(t *testing.T) { testCases := map[string]struct { diff --git a/pkg/ruler/api.go b/pkg/ruler/api.go index fa87e200ab0..c515205ccc1 100644 --- a/pkg/ruler/api.go +++ b/pkg/ruler/api.go @@ -668,7 +668,7 @@ func (a *API) CreateRuleGroup(w http.ResponseWriter, req *http.Request) { return } - errs := a.ruler.manager.ValidateRuleGroup(rg, node) + errs := a.ruler.manager.ValidateRuleGroup(userID, rg, node) if len(errs) > 0 { e := []string{} for _, err := range errs { diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 3a001060e33..a18749f9369 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -15,6 +15,7 @@ import ( "github.com/grafana/dskit/user" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -216,6 +217,7 @@ type RulesLimits interface { RulerMaxIndependentRuleEvaluationConcurrencyPerTenant(userID string) int64 RulerAlertmanagerClientConfig(userID string) notifierCfg.AlertmanagerClientConfig RulerMinRuleEvaluationInterval(userID string) time.Duration + ValidationScheme(userID string) model.ValidationScheme } func MetricsQueryFunc(qf rules.QueryFunc, userID string, queries, failedQueries *prometheus.CounterVec, remoteQuerier bool) rules.QueryFunc { diff --git a/pkg/ruler/manager.go b/pkg/ruler/manager.go index d78a0a77b48..567a1834fab 100644 --- a/pkg/ruler/manager.go +++ b/pkg/ruler/manager.go @@ -416,7 +416,7 @@ func (r *DefaultMultiTenantManager) Stop() { r.mapper.cleanup() } -func (r *DefaultMultiTenantManager) ValidateRuleGroup(g rulefmt.RuleGroup, node rulefmt.RuleGroupNode) []error { +func (r *DefaultMultiTenantManager) ValidateRuleGroup(userID string, g rulefmt.RuleGroup, node rulefmt.RuleGroupNode) []error { var errs []error if g.Name == "" { @@ -439,8 +439,9 @@ func (r *DefaultMultiTenantManager) ValidateRuleGroup(g rulefmt.RuleGroup, node errs = append(errs, fmt.Errorf("invalid rules configuration: rule group '%s' has both query_offset and (deprecated) evaluation_delay set, but to different values; please remove the deprecated evaluation_delay and use query_offset instead", g.Name)) } + validationScheme := r.limits.ValidationScheme(userID) for i, r := range g.Rules { - for _, err := range r.Validate(node.Rules[i]) { + for _, err := range r.Validate(node.Rules[i], validationScheme) { var ruleName string if r.Alert != "" { ruleName = r.Alert diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 13e7346659e..01e24c99e0e 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -291,7 +291,7 @@ type MultiTenantManager interface { Stop() // ValidateRuleGroup validates a rulegroup - ValidateRuleGroup(rulefmt.RuleGroup, rulefmt.RuleGroupNode) []error + ValidateRuleGroup(userID string, ruleGroup rulefmt.RuleGroup, ruleGroupNode rulefmt.RuleGroupNode) []error // Start evaluating rules. Start() diff --git a/pkg/streamingpromql/compat/name_validating_engine.go b/pkg/streamingpromql/compat/name_validating_engine.go new file mode 100644 index 00000000000..c1acdf190c9 --- /dev/null +++ b/pkg/streamingpromql/compat/name_validating_engine.go @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package compat + +import ( + "context" + "time" + + "github.com/grafana/dskit/tenant" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + + "github.com/grafana/mimir/pkg/util/validation" +) + +type nameValidatingEngine struct { + engine promql.QueryEngine + limits *validation.Overrides +} + +// NameValidatingEngine creates a new promql.QueryEngine that wraps engine and overrides query options +// with the name validation scheme from limits. +func NameValidatingEngine(engine promql.QueryEngine, limits *validation.Overrides) promql.QueryEngine { + return &nameValidatingEngine{engine: engine, limits: limits} +} + +type optsWithValidationScheme struct { + promql.QueryOpts + validationScheme model.ValidationScheme +} + +func (o optsWithValidationScheme) EnablePerStepStats() bool { + return o.QueryOpts.EnablePerStepStats() +} + +func (o optsWithValidationScheme) LookbackDelta() time.Duration { + return o.QueryOpts.LookbackDelta() +} + +func (o optsWithValidationScheme) ValidationScheme() model.ValidationScheme { + return o.validationScheme +} + +func (e nameValidatingEngine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) { + validationScheme, err := e.getValidationScheme(ctx) + if err != nil { + return nil, err + } + if opts == nil { + opts = promql.NewPrometheusQueryOpts(false, 0, model.UTF8Validation) + } + opts = &optsWithValidationScheme{ + QueryOpts: opts, + validationScheme: validationScheme, + } + return e.engine.NewInstantQuery(ctx, q, opts, qs, ts) +} + +func (e nameValidatingEngine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) { + validationScheme, err := e.getValidationScheme(ctx) + if err != nil { + return nil, err + } + if opts == nil { + opts = promql.NewPrometheusQueryOpts(false, 0, model.UTF8Validation) + } + opts = &optsWithValidationScheme{ + QueryOpts: opts, + validationScheme: validationScheme, + } + return e.engine.NewRangeQuery(ctx, q, opts, qs, start, end, interval) +} + +// getValidationScheme retrieves the name validation scheme to use from a context containing tenant IDs. +// Returns legacy validation scheme if at least one tenant uses legacy validation. +func (e nameValidatingEngine) getValidationScheme(ctx context.Context) (model.ValidationScheme, error) { + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return model.UnsetValidation, err + } + for _, tenantID := range tenantIDs { + if e.limits.ValidationScheme(tenantID) == model.LegacyValidation { + return model.LegacyValidation, nil + } + } + return model.UTF8Validation, nil +} diff --git a/pkg/streamingpromql/engine_test.go b/pkg/streamingpromql/engine_test.go index f12ad73e7e6..f5e8392cc81 100644 --- a/pkg/streamingpromql/engine_test.go +++ b/pkg/streamingpromql/engine_test.go @@ -22,6 +22,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" @@ -3213,7 +3214,7 @@ func TestQueryStats(t *testing.T) { runQueryAndGetSamplesStats := func(t *testing.T, engine promql.QueryEngine, expr string, isInstantQuery bool) *promstats.QuerySamples { var q promql.Query var err error - opts := promql.NewPrometheusQueryOpts(true, 0) + opts := promql.NewPrometheusQueryOpts(true, 0, model.LegacyValidation) if isInstantQuery { q, err = engine.NewInstantQuery(context.Background(), storage, opts, expr, end) } else { @@ -3507,7 +3508,7 @@ func TestQueryStatsUpstreamTestCases(t *testing.T) { runQueryAndGetSamplesStats := func(t *testing.T, engine promql.QueryEngine, expr string, start, end time.Time, interval time.Duration) *promstats.QuerySamples { var q promql.Query var err error - opts := promql.NewPrometheusQueryOpts(true, 0) + opts := promql.NewPrometheusQueryOpts(true, 0, model.LegacyValidation) if interval == 0 { // Instant query @@ -3888,7 +3889,7 @@ func TestQueryStatementLookbackDelta(t *testing.T) { require.NoError(t, err) t.Run("lookback delta not set in query options", func(t *testing.T) { - queryOpts := promql.NewPrometheusQueryOpts(false, 0) + queryOpts := promql.NewPrometheusQueryOpts(false, 0, model.LegacyValidation) runTest(t, engine, queryOpts, defaultLookbackDelta) }) @@ -3897,7 +3898,7 @@ func TestQueryStatementLookbackDelta(t *testing.T) { }) t.Run("lookback delta set in query options", func(t *testing.T) { - queryOpts := promql.NewPrometheusQueryOpts(false, 14*time.Minute) + queryOpts := promql.NewPrometheusQueryOpts(false, 14*time.Minute, model.LegacyValidation) runTest(t, engine, queryOpts, 14*time.Minute) }) }) @@ -3909,7 +3910,7 @@ func TestQueryStatementLookbackDelta(t *testing.T) { require.NoError(t, err) t.Run("lookback delta not set in query options", func(t *testing.T) { - queryOpts := promql.NewPrometheusQueryOpts(false, 0) + queryOpts := promql.NewPrometheusQueryOpts(false, 0, model.LegacyValidation) runTest(t, engine, queryOpts, 12*time.Minute) }) @@ -3918,7 +3919,7 @@ func TestQueryStatementLookbackDelta(t *testing.T) { }) t.Run("lookback delta set in query options", func(t *testing.T) { - queryOpts := promql.NewPrometheusQueryOpts(false, 14*time.Minute) + queryOpts := promql.NewPrometheusQueryOpts(false, 14*time.Minute, model.LegacyValidation) runTest(t, engine, queryOpts, 14*time.Minute) }) }) diff --git a/pkg/streamingpromql/operators/aggregations/aggregation_test.go b/pkg/streamingpromql/operators/aggregations/aggregation_test.go index f26cb689b28..882f7306f61 100644 --- a/pkg/streamingpromql/operators/aggregations/aggregation_test.go +++ b/pkg/streamingpromql/operators/aggregations/aggregation_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" @@ -337,7 +338,16 @@ func TestAggregations_ReturnIncompleteGroupsOnEarlyClose(t *testing.T) { "count_values": { createOperator: func(inner types.InstantVectorOperator, queryTimeRange types.QueryTimeRange, memoryConsumptionTracker *limiter.MemoryConsumptionTracker) (types.InstantVectorOperator, error) { labelName := operators.NewStringLiteral("value", posrange.PositionRange{}) - return NewCountValues(inner, labelName, queryTimeRange, []string{"group"}, false, memoryConsumptionTracker, posrange.PositionRange{}), nil + return NewCountValues( + inner, + labelName, + queryTimeRange, + []string{"group"}, + false, + memoryConsumptionTracker, + posrange.PositionRange{}, + model.LegacyValidation, + ), nil }, instant: true, allowExpectedSeriesInAnyOrder: true, diff --git a/pkg/streamingpromql/operators/aggregations/count_values.go b/pkg/streamingpromql/operators/aggregations/count_values.go index 65e85bafb0f..a782798d7fe 100644 --- a/pkg/streamingpromql/operators/aggregations/count_values.go +++ b/pkg/streamingpromql/operators/aggregations/count_values.go @@ -39,6 +39,8 @@ type CountValues struct { labelsBuilder *labels.Builder labelsBytesBuffer []byte valueBuffer []byte + + nameValidationScheme model.ValidationScheme } var _ types.InstantVectorOperator = &CountValues{} @@ -51,6 +53,7 @@ func NewCountValues( without bool, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, expressionPosition posrange.PositionRange, + nameValidationScheme model.ValidationScheme, ) *CountValues { if without { grouping = append(grouping, labels.MetricName) @@ -66,6 +69,7 @@ func NewCountValues( Without: without, MemoryConsumptionTracker: memoryConsumptionTracker, expressionPosition: expressionPosition, + nameValidationScheme: nameValidationScheme, } } @@ -154,7 +158,7 @@ func (c *CountValues) SeriesMetadata(ctx context.Context) ([]types.SeriesMetadat func (c *CountValues) loadLabelName() error { c.resolvedLabelName = c.LabelName.GetValue() - if !model.LabelName(c.resolvedLabelName).IsValid() { + if !model.LabelName(c.resolvedLabelName).IsValid(c.nameValidationScheme) { return fmt.Errorf("invalid label name %q", c.resolvedLabelName) } diff --git a/pkg/streamingpromql/operators/aggregations/count_values_test.go b/pkg/streamingpromql/operators/aggregations/count_values_test.go index b6b13225e79..959b5d7564c 100644 --- a/pkg/streamingpromql/operators/aggregations/count_values_test.go +++ b/pkg/streamingpromql/operators/aggregations/count_values_test.go @@ -6,6 +6,7 @@ import ( "context" "testing" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" @@ -49,6 +50,11 @@ func TestCountValues_GroupLabelling(t *testing.T) { inputSeries: labels.FromStrings(labels.MetricName, "my_metric", "env", "prod", "foo", "bar"), expectedOutputSeries: labels.FromStrings("env", "prod", "value", "123"), }, + "grouping with 'by', single utf8 grouping label, input does have grouping label": { + grouping: []string{"env😀"}, + inputSeries: labels.FromStrings(labels.MetricName, "my_metric", "env😀", "prod", "foo", "bar"), + expectedOutputSeries: labels.FromStrings("env😀", "prod", "value", "123"), + }, "grouping with 'by', multiple grouping labels, input has only metric name": { grouping: []string{"cluster", "env"}, inputSeries: labels.FromStrings(labels.MetricName, "my_metric"), @@ -109,6 +115,12 @@ func TestCountValues_GroupLabelling(t *testing.T) { inputSeries: labels.FromStrings(labels.MetricName, "my_metric", "env", "prod", "a-label", "a-value", "f-label", "f-value"), expectedOutputSeries: labels.FromStrings("a-label", "a-value", "f-label", "f-value", "value", "123"), }, + "grouping with 'without', single utf8 grouping label, input does have grouping label": { + grouping: []string{"env😀"}, + without: true, + inputSeries: labels.FromStrings(labels.MetricName, "my_metric", "env😀", "prod", "a-label", "a-value", "f-label", "f-value"), + expectedOutputSeries: labels.FromStrings("a-label", "a-value", "f-label", "f-value", "value", "123"), + }, "grouping with 'without', multiple grouping labels, input has only metric name": { grouping: []string{"cluster", "env"}, without: true, @@ -222,7 +234,16 @@ func TestCountValues_GroupLabelling(t *testing.T) { } labelName := operators.NewStringLiteral("value", posrange.PositionRange{}) - aggregator := NewCountValues(inner, labelName, types.NewInstantQueryTimeRange(timestamp.Time(0)), testCase.grouping, testCase.without, memoryConsumptionTracker, posrange.PositionRange{}) + aggregator := NewCountValues( + inner, + labelName, + types.NewInstantQueryTimeRange(timestamp.Time(0)), + testCase.grouping, + testCase.without, + memoryConsumptionTracker, + posrange.PositionRange{}, + model.UTF8Validation, + ) metadata, err := aggregator.SeriesMetadata(context.Background()) require.NoError(t, err) diff --git a/pkg/streamingpromql/operators/functions/factories.go b/pkg/streamingpromql/operators/functions/factories.go index f595ccdb112..04b89bb114c 100644 --- a/pkg/streamingpromql/operators/functions/factories.go +++ b/pkg/streamingpromql/operators/functions/factories.go @@ -6,6 +6,7 @@ import ( "fmt" "math" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser/posrange" @@ -25,6 +26,7 @@ type FunctionOperatorFactory func( annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, + validationScheme model.ValidationScheme, ) (types.Operator, error) // SingleInputVectorFunctionOperatorFactory creates an InstantVectorFunctionOperatorFactory for functions @@ -34,7 +36,7 @@ type FunctionOperatorFactory func( // - name: The name of the function // - f: The function implementation func SingleInputVectorFunctionOperatorFactory(name string, f FunctionOverInstantVectorDefinition) FunctionOperatorFactory { - return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { + return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 1 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 1 argument for %s, got %v", name, len(args)) @@ -77,7 +79,7 @@ func TimeTransformationFunctionOperatorFactory(name string, seriesDataFunc Insta SeriesMetadataFunction: DropSeriesName, } - return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { + return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { var inner types.InstantVectorOperator if len(args) == 0 { // if the argument is not provided, it will default to vector(time()) @@ -131,7 +133,7 @@ func FunctionOverRangeVectorOperatorFactory( name string, f FunctionOverRangeVectorDefinition, ) FunctionOperatorFactory { - return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { + return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 1 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 1 argument for %s, got %v", name, len(args)) @@ -153,7 +155,7 @@ func FunctionOverRangeVectorOperatorFactory( } } -func PredictLinearFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func PredictLinearFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { f := PredictLinear if len(args) != 2 { @@ -182,7 +184,7 @@ func PredictLinearFactory(args []types.Operator, _ labels.Labels, memoryConsumpt return o, nil } -func QuantileOverTimeFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func QuantileOverTimeFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { f := QuantileOverTime if len(args) != 2 { @@ -211,7 +213,7 @@ func QuantileOverTimeFactory(args []types.Operator, _ labels.Labels, memoryConsu return o, nil } -func scalarToInstantVectorOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, _ types.QueryTimeRange) (types.Operator, error) { +func scalarToInstantVectorOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, _ types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 1 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 1 argument for vector, got %v", len(args)) @@ -226,7 +228,7 @@ func scalarToInstantVectorOperatorFactory(args []types.Operator, _ labels.Labels return scalars.NewScalarToInstantVector(inner, expressionPosition, memoryConsumptionTracker), nil } -func LabelJoinFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func LabelJoinFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, nameValidationScheme model.ValidationScheme) (types.Operator, error) { // It is valid for label_join to have no source label names. ie, only 3 arguments are actually required. if len(args) < 3 { // Should be caught by the PromQL parser, but we check here for safety. @@ -264,7 +266,7 @@ func LabelJoinFunctionOperatorFactory(args []types.Operator, _ labels.Labels, me f := FunctionOverInstantVectorDefinition{ SeriesDataFunc: PassthroughData, SeriesMetadataFunction: SeriesMetadataFunctionDefinition{ - Func: LabelJoinFactory(dstLabel, separator, srcLabels), + Func: LabelJoinFactory(dstLabel, separator, srcLabels, nameValidationScheme), NeedsSeriesDeduplication: true, }, } @@ -274,7 +276,7 @@ func LabelJoinFunctionOperatorFactory(args []types.Operator, _ labels.Labels, me return operators.NewDeduplicateAndMerge(o, memoryConsumptionTracker), nil } -func LabelReplaceFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func LabelReplaceFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, validationScheme model.ValidationScheme) (types.Operator, error) { if len(args) != 5 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 5 arguments for label_replace, got %v", len(args)) @@ -313,7 +315,7 @@ func LabelReplaceFunctionOperatorFactory(args []types.Operator, _ labels.Labels, f := FunctionOverInstantVectorDefinition{ SeriesDataFunc: PassthroughData, SeriesMetadataFunction: SeriesMetadataFunctionDefinition{ - Func: LabelReplaceFactory(dstLabel, replacement, srcLabel, regex), + Func: LabelReplaceFactory(dstLabel, replacement, srcLabel, regex, validationScheme), NeedsSeriesDeduplication: true, }, } @@ -323,7 +325,7 @@ func LabelReplaceFunctionOperatorFactory(args []types.Operator, _ labels.Labels, return operators.NewDeduplicateAndMerge(o, memoryConsumptionTracker), nil } -func AbsentOperatorFactory(args []types.Operator, labels labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func AbsentOperatorFactory(args []types.Operator, labels labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 1 { return nil, fmt.Errorf("expected exactly 1 parameter for 'absent', got %v", len(args)) } @@ -336,7 +338,7 @@ func AbsentOperatorFactory(args []types.Operator, labels labels.Labels, memoryCo return NewAbsent(inner, labels, timeRange, memoryConsumptionTracker, expressionPosition), nil } -func AbsentOverTimeOperatorFactory(args []types.Operator, labels labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func AbsentOverTimeOperatorFactory(args []types.Operator, labels labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 1 { return nil, fmt.Errorf("expected exactly 1 parameter for 'absent_over_time', got %v", len(args)) } @@ -349,7 +351,7 @@ func AbsentOverTimeOperatorFactory(args []types.Operator, labels labels.Labels, return NewAbsentOverTime(inner, labels, timeRange, memoryConsumptionTracker, expressionPosition), nil } -func ClampFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func ClampFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 3 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 3 arguments for clamp, got %v", len(args)) @@ -383,7 +385,7 @@ func ClampFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memory } func ClampMinMaxFunctionOperatorFactory(functionName string, isMin bool) FunctionOperatorFactory { - return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { + return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 2 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 2 arguments for %s, got %v", functionName, len(args)) @@ -411,7 +413,7 @@ func ClampMinMaxFunctionOperatorFactory(functionName string, isMin bool) Functio } } -func RoundFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func RoundFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 1 && len(args) != 2 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected 1 or 2 arguments for round, got %v", len(args)) @@ -443,7 +445,7 @@ func RoundFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memory return operators.NewDeduplicateAndMerge(o, memoryConsumptionTracker), nil } -func HistogramQuantileFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func HistogramQuantileFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 2 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 2 arguments for histogram_quantile, got %v", len(args)) @@ -465,7 +467,7 @@ func HistogramQuantileFunctionOperatorFactory(args []types.Operator, _ labels.La return operators.NewDeduplicateAndMerge(o, memoryConsumptionTracker), nil } -func HistogramFractionFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func HistogramFractionFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 3 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 3 arguments for histogram_fraction, got %v", len(args)) @@ -493,7 +495,7 @@ func HistogramFractionFunctionOperatorFactory(args []types.Operator, _ labels.La return operators.NewDeduplicateAndMerge(o, memoryConsumptionTracker), nil } -func TimestampFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func TimestampFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 1 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 1 argument for timestamp, got %v", len(args)) @@ -523,7 +525,7 @@ func SortByLabelOperatorFactory(descending bool) FunctionOperatorFactory { functionName = "sort_by_label_desc" } - return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { + return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) < 1 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected at least 1 argument for %s, got %v", functionName, len(args)) @@ -564,7 +566,7 @@ func SortOperatorFactory(descending bool) FunctionOperatorFactory { functionName = "sort_desc" } - return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { + return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 1 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 1 argument for %s, got %v", functionName, len(args)) @@ -617,7 +619,7 @@ func RegisterFunction(function Function, name string, returnType parser.ValueTyp return nil } -func piOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func piOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 0 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 0 arguments for pi, got %v", len(args)) @@ -626,7 +628,7 @@ func piOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumption return scalars.NewScalarConstant(math.Pi, timeRange, memoryConsumptionTracker, expressionPosition), nil } -func timeOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func timeOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 0 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 0 arguments for time, got %v", len(args)) @@ -635,7 +637,7 @@ func timeOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumpti return operators.NewTime(timeRange, memoryConsumptionTracker, expressionPosition), nil } -func instantVectorToScalarOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func instantVectorToScalarOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { if len(args) != 1 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 1 argument for scalar, got %v", len(args)) @@ -660,7 +662,7 @@ func UnaryNegationOfInstantVectorOperatorFactory(inner types.InstantVectorOperat return operators.NewDeduplicateAndMerge(o, memoryConsumptionTracker) } -func DoubleExponentialSmoothingFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { +func DoubleExponentialSmoothingFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { f := DoubleExponentialSmoothing functionName := "double_exponential_smoothing" diff --git a/pkg/streamingpromql/operators/functions/label.go b/pkg/streamingpromql/operators/functions/label.go index 1ec655a1ab2..dd378445258 100644 --- a/pkg/streamingpromql/operators/functions/label.go +++ b/pkg/streamingpromql/operators/functions/label.go @@ -18,17 +18,17 @@ import ( "github.com/grafana/mimir/pkg/util/limiter" ) -func LabelJoinFactory(dstLabelOp, separatorOp types.StringOperator, srcLabelOps []types.StringOperator) SeriesMetadataFunction { +func LabelJoinFactory(dstLabelOp, separatorOp types.StringOperator, srcLabelOps []types.StringOperator, validationScheme model.ValidationScheme) SeriesMetadataFunction { return func(seriesMetadata []types.SeriesMetadata, tracker *limiter.MemoryConsumptionTracker) ([]types.SeriesMetadata, error) { dst := dstLabelOp.GetValue() - if !model.LabelName(dst).IsValid() { + if !model.LabelName(dst).IsValid(validationScheme) { return nil, fmt.Errorf("invalid destination label name in label_join(): %s", dst) } separator := separatorOp.GetValue() srcLabels := make([]string, len(srcLabelOps)) for i, op := range srcLabelOps { src := op.GetValue() - if !model.LabelName(src).IsValid() { + if !model.LabelName(src).IsValid(validationScheme) { return nil, fmt.Errorf("invalid source label name in label_join(): %s", dst) } srcLabels[i] = src @@ -63,7 +63,7 @@ func LabelJoinFactory(dstLabelOp, separatorOp types.StringOperator, srcLabelOps } } -func LabelReplaceFactory(dstLabelOp, replacementOp, srcLabelOp, regexOp types.StringOperator) SeriesMetadataFunction { +func LabelReplaceFactory(dstLabelOp, replacementOp, srcLabelOp, regexOp types.StringOperator, validationScheme model.ValidationScheme) SeriesMetadataFunction { return func(seriesMetadata []types.SeriesMetadata, tracker *limiter.MemoryConsumptionTracker) ([]types.SeriesMetadata, error) { regexStr := regexOp.GetValue() regex, err := regexp.Compile("^(?s:" + regexStr + ")$") @@ -71,7 +71,7 @@ func LabelReplaceFactory(dstLabelOp, replacementOp, srcLabelOp, regexOp types.St return nil, fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr) } dst := dstLabelOp.GetValue() - if !model.LabelName(dst).IsValid() { + if !model.LabelName(dst).IsValid(validationScheme) { return nil, fmt.Errorf("invalid destination label name in label_replace(): %s", dst) } repl := replacementOp.GetValue() diff --git a/pkg/streamingpromql/planning.go b/pkg/streamingpromql/planning.go index 0300217a469..e7390b6f011 100644 --- a/pkg/streamingpromql/planning.go +++ b/pkg/streamingpromql/planning.go @@ -126,14 +126,13 @@ func (p *QueryPlanner) NewQueryPlan(ctx context.Context, qs string, timeRange ty } expr, err = p.runASTStage("Pre-processing", observer, func() (parser.Expr, error) { - step := time.Duration(timeRange.IntervalMilliseconds) * time.Millisecond - + start, end := timestamp.Time(timeRange.StartT), timestamp.Time(timeRange.EndT) + interval := time.Duration(timeRange.IntervalMilliseconds) * time.Millisecond if timeRange.IsInstant { - // timeRange.IntervalMilliseconds is 1 for instant queries, but we need to pass 0 for instant queries to PreprocessExpr. - step = 0 + // Prometheus expects interval to be zero for instant queries but we use 1. + interval = 0 } - - return promql.PreprocessExpr(expr, timestamp.Time(timeRange.StartT), timestamp.Time(timeRange.EndT), step) + return promql.PreprocessExpr(expr, start, end, interval) }) if err != nil { @@ -441,7 +440,7 @@ func findFunction(name string) (functions.Function, bool) { // Materialize converts a query plan into an executable query. func (e *Engine) Materialize(ctx context.Context, plan *planning.QueryPlan, queryable storage.Queryable, opts promql.QueryOpts) (promql.Query, error) { if opts == nil { - opts = promql.NewPrometheusQueryOpts(false, 0) + opts = promql.NewPrometheusQueryOpts(false, 0, model.UTF8Validation) } queryID, err := e.activeQueryTracker.Insert(ctx, plan.OriginalExpression+" # (materialization)") @@ -467,6 +466,7 @@ func (e *Engine) Materialize(ctx context.Context, plan *planning.QueryPlan, quer Annotations: q.annotations, LookbackDelta: q.lookbackDelta, EagerLoadSelectors: q.engine.eagerLoadSelectors, + NameValidationScheme: q.nameValidationScheme, } q.statement = &parser.EvalStmt{ diff --git a/pkg/streamingpromql/planning/core/aggregate_expression.go b/pkg/streamingpromql/planning/core/aggregate_expression.go index d51cdc375d0..bd67756c425 100644 --- a/pkg/streamingpromql/planning/core/aggregate_expression.go +++ b/pkg/streamingpromql/planning/core/aggregate_expression.go @@ -151,7 +151,16 @@ func (a *AggregateExpression) OperatorFactory(children []types.Operator, timeRan return nil, fmt.Errorf("expected StringOperator as parameter child of AggregateExpression with operation %s, got %T", a.Op.String(), children[0]) } - o = aggregations.NewCountValues(inner, param, timeRange, a.Grouping, a.Without, params.MemoryConsumptionTracker, a.ExpressionPosition.ToPrometheusType()) + o = aggregations.NewCountValues( + inner, + param, + timeRange, + a.Grouping, + a.Without, + params.MemoryConsumptionTracker, + a.ExpressionPosition.ToPrometheusType(), + params.NameValidationScheme, + ) default: if len(children) != 1 { diff --git a/pkg/streamingpromql/planning/core/function_call.go b/pkg/streamingpromql/planning/core/function_call.go index d94bfd6bb96..382f8131a7d 100644 --- a/pkg/streamingpromql/planning/core/function_call.go +++ b/pkg/streamingpromql/planning/core/function_call.go @@ -94,7 +94,7 @@ func (f *FunctionCall) OperatorFactory(children []types.Operator, timeRange type absentLabels = mimirpb.FromLabelAdaptersToLabels(f.AbsentLabels) } - o, err := fnc.OperatorFactory(children, absentLabels, params.MemoryConsumptionTracker, params.Annotations, f.ExpressionPosition.ToPrometheusType(), timeRange) + o, err := fnc.OperatorFactory(children, absentLabels, params.MemoryConsumptionTracker, params.Annotations, f.ExpressionPosition.ToPrometheusType(), timeRange, params.NameValidationScheme) if err != nil { return nil, err } diff --git a/pkg/streamingpromql/planning/plan.go b/pkg/streamingpromql/planning/plan.go index 8589dfd0def..934321f2f41 100644 --- a/pkg/streamingpromql/planning/plan.go +++ b/pkg/streamingpromql/planning/plan.go @@ -9,6 +9,7 @@ import ( "time" "github.com/gogo/protobuf/proto" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" @@ -92,6 +93,7 @@ type OperatorParameters struct { Annotations *annotations.Annotations LookbackDelta time.Duration EagerLoadSelectors bool + NameValidationScheme model.ValidationScheme } func (p *QueryPlan) ToEncodedPlan(includeDescriptions bool, includeDetails bool) (*EncodedQueryPlan, error) { diff --git a/pkg/streamingpromql/query.go b/pkg/streamingpromql/query.go index 39860573499..01c707ea0ac 100644 --- a/pkg/streamingpromql/query.go +++ b/pkg/streamingpromql/query.go @@ -14,6 +14,7 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/dskit/cancellation" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -46,6 +47,7 @@ type Query struct { annotations *annotations.Annotations stats *types.QueryStats lookbackDelta time.Duration + nameValidationScheme model.ValidationScheme // Time range of the top-level query. // Subqueries may use a different range. @@ -59,7 +61,7 @@ type Query struct { func (e *Engine) newQuery(ctx context.Context, queryable storage.Queryable, opts promql.QueryOpts, timeRange types.QueryTimeRange, originalExpression string) (*Query, error) { if opts == nil { - opts = promql.NewPrometheusQueryOpts(false, 0) + opts = promql.NewPrometheusQueryOpts(false, 0, model.UTF8Validation) } lookbackDelta := opts.LookbackDelta() @@ -86,6 +88,7 @@ func (e *Engine) newQuery(ctx context.Context, queryable storage.Queryable, opts topLevelQueryTimeRange: timeRange, lookbackDelta: lookbackDelta, originalExpression: originalExpression, + nameValidationScheme: opts.ValidationScheme(), } return q, nil diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index 3df835d0351..a6c050c75d5 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -1412,6 +1412,12 @@ func (o *Overrides) LabelsQueryOptimizerEnabled(userID string) bool { return o.getOverridesForUser(userID).LabelsQueryOptimizerEnabled } +// ValidationScheme returns the validation scheme to use for a particular tenant. +func (o *Overrides) ValidationScheme(_ string) model.ValidationScheme { + // TODO(juliusmh): make this configurable by tenant + return model.LegacyValidation +} + // CardinalityAnalysisMaxResults returns the maximum number of results that // can be returned in a single cardinality API request. func (o *Overrides) CardinalityAnalysisMaxResults(userID string) int { diff --git a/tools/benchmark-query-engine/main.go b/tools/benchmark-query-engine/main.go index bce00cf0eca..581c8fe44e8 100644 --- a/tools/benchmark-query-engine/main.go +++ b/tools/benchmark-query-engine/main.go @@ -203,7 +203,7 @@ func (a *app) buildBinary() error { a.binaryPath = filepath.Join(a.tempDir, "benchmark-binary") - cmd := exec.Command("go", "test", "-c", "-o", a.binaryPath, "-tags", "stringlabels", ".") + cmd := exec.Command("go", "test", "-c", "-o", a.binaryPath, "-tags", "stringlabels,localvalidationscheme", ".") cmd.Dir = a.benchmarkPackageDir cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/vendor/github.com/prometheus/alertmanager/config/notifiers.go b/vendor/github.com/prometheus/alertmanager/config/notifiers.go index 16b7a33f308..97ce8692da6 100644 --- a/vendor/github.com/prometheus/alertmanager/config/notifiers.go +++ b/vendor/github.com/prometheus/alertmanager/config/notifiers.go @@ -514,7 +514,7 @@ type WebhookConfig struct { // Timeout is the maximum time allowed to invoke the webhook. Setting this to 0 // does not impose a timeout. - Timeout model.Duration `yaml:"timeout" json:"timeout"` + Timeout time.Duration `yaml:"timeout" json:"timeout"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/vendor/github.com/prometheus/alertmanager/matchers/compat/parse.go b/vendor/github.com/prometheus/alertmanager/matchers/compat/parse.go index 0c0dfffb1fd..951310268c9 100644 --- a/vendor/github.com/prometheus/alertmanager/matchers/compat/parse.go +++ b/vendor/github.com/prometheus/alertmanager/matchers/compat/parse.go @@ -190,7 +190,7 @@ func FallbackMatchersParser(l log.Logger) ParseMatchers { // isValidClassicLabelName returns true if the string is a valid classic label name. func isValidClassicLabelName(_ log.Logger) func(model.LabelName) bool { return func(name model.LabelName) bool { - return name.IsValid() + return name.IsValidLegacy() } } diff --git a/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go b/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go index 153c17f565d..eb4e01ba40d 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go +++ b/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go @@ -21,7 +21,6 @@ import ( "net/http" "os" "strings" - "time" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -126,7 +125,7 @@ func (n *Notifier) Notify(ctx context.Context, alerts ...*types.Alert) (bool, er } if n.conf.Timeout > 0 { - postCtx, cancel := context.WithTimeoutCause(ctx, time.Duration(n.conf.Timeout), fmt.Errorf("configured webhook timeout reached (%s)", n.conf.Timeout)) + postCtx, cancel := context.WithTimeoutCause(ctx, n.conf.Timeout, fmt.Errorf("configured webhook timeout reached (%s)", n.conf.Timeout)) defer cancel() ctx = postCtx } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 1448439b7f7..8e1b02f01af 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -14,7 +14,6 @@ package expfmt import ( - "bufio" "fmt" "io" "math" @@ -70,21 +69,6 @@ func ResponseFormat(h http.Header) Format { return FmtUnknown } -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. -func NewDecoder(r io.Reader, format Format) Decoder { - switch format.FormatType() { - case TypeProtoDelim: - return &protoDecoder{r: bufio.NewReader(r)} - } - return &textDecoder{r: r} -} - -// protoDecoder implements the Decoder interface for protocol buffers. -type protoDecoder struct { - r protodelim.Reader -} - // Decode implements the Decoder interface. func (d *protoDecoder) Decode(v *dto.MetricFamily) error { opts := protodelim.UnmarshalOptions{ @@ -93,7 +77,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + if !d.isValidMetricName(v.GetName()) { return fmt.Errorf("invalid metric name %q", v.GetName()) } for _, m := range v.GetMetric() { @@ -107,7 +91,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if !model.LabelValue(l.GetValue()).IsValid() { return fmt.Errorf("invalid label value %q", l.GetValue()) } - if !model.LabelName(l.GetName()).IsValid() { + if !d.isValidLabelName(l.GetName()) { return fmt.Errorf("invalid label name %q", l.GetName()) } } diff --git a/vendor/github.com/prometheus/common/expfmt/decode_globalvalidationscheme.go b/vendor/github.com/prometheus/common/expfmt/decode_globalvalidationscheme.go new file mode 100644 index 00000000000..ffb8b6ed8a5 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode_globalvalidationscheme.go @@ -0,0 +1,48 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !localvalidationscheme + +package expfmt + +import ( + "bufio" + "io" + + "google.golang.org/protobuf/encoding/protodelim" + + "github.com/prometheus/common/model" +) + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r protodelim.Reader +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format.FormatType() { + case TypeProtoDelim: + return &protoDecoder{r: bufio.NewReader(r)} + } + return &textDecoder{r: r} +} + +func (d *protoDecoder) isValidMetricName(name string) bool { + return model.IsValidMetricName(model.LabelValue(name)) +} + +func (d *protoDecoder) isValidLabelName(name string) bool { + return model.LabelName(name).IsValid() +} diff --git a/vendor/github.com/prometheus/common/expfmt/decode_localvalidationscheme.go b/vendor/github.com/prometheus/common/expfmt/decode_localvalidationscheme.go new file mode 100644 index 00000000000..8e0dbb59648 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode_localvalidationscheme.go @@ -0,0 +1,52 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build localvalidationscheme + +package expfmt + +import ( + "bufio" + "io" + + "google.golang.org/protobuf/encoding/protodelim" + + "github.com/prometheus/common/model" +) + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r protodelim.Reader + validationScheme model.ValidationScheme +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format, validationScheme model.ValidationScheme) Decoder { + switch format.FormatType() { + case TypeProtoDelim: + return &protoDecoder{ + r: bufio.NewReader(r), + validationScheme: validationScheme, + } + } + return &textDecoder{r: r} +} + +func (d *protoDecoder) isValidMetricName(name string) bool { + return model.IsValidMetricName(model.LabelValue(name), d.validationScheme) +} + +func (d *protoDecoder) isValidLabelName(name string) bool { + return model.LabelName(name).IsValid(d.validationScheme) +} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 460f554f294..2cb61932bad 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -88,20 +88,20 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus { } // Validate checks whether the alert data is inconsistent. -func (a *Alert) Validate() error { +func (a *Alert) validate(scheme ValidationScheme) error { if a.StartsAt.IsZero() { return errors.New("start time missing") } if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { return errors.New("start time must be before end time") } - if err := a.Labels.Validate(); err != nil { + if err := a.Labels.validate(scheme); err != nil { return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { return errors.New("at least one label pair required") } - if err := a.Annotations.Validate(); err != nil { + if err := a.Annotations.validate(scheme); err != nil { return fmt.Errorf("invalid annotations: %w", err) } return nil diff --git a/vendor/github.com/prometheus/common/model/alert_globalvalidationscheme.go b/vendor/github.com/prometheus/common/model/alert_globalvalidationscheme.go new file mode 100644 index 00000000000..b71b84eb23d --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert_globalvalidationscheme.go @@ -0,0 +1,21 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !localvalidationscheme + +package model + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + return a.validate(NameValidationScheme) +} diff --git a/vendor/github.com/prometheus/common/model/alert_localvalidationscheme.go b/vendor/github.com/prometheus/common/model/alert_localvalidationscheme.go new file mode 100644 index 00000000000..eb3e7ff76ae --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert_localvalidationscheme.go @@ -0,0 +1,21 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build localvalidationscheme + +package model + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate(scheme ValidationScheme) error { + return a.validate(scheme) +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index e2ff835950d..112d61c9db7 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -19,6 +19,8 @@ import ( "regexp" "strings" "unicode/utf8" + + "gopkg.in/yaml.v2" ) const ( @@ -103,20 +105,17 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid returns true iff the name matches the pattern of LabelNameRE when -// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if -// NameValidationScheme is set to UTF8Validation. -func (ln LabelName) IsValid() bool { +func (ln LabelName) isValid(scheme ValidationScheme) bool { if len(ln) == 0 { return false } - switch NameValidationScheme { + switch scheme { case LegacyValidation: return ln.IsValidLegacy() case UTF8Validation: return utf8.ValidString(string(ln)) default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", scheme)) } } @@ -136,31 +135,11 @@ func (ln LabelName) IsValidLegacy() bool { return true } -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (ln *LabelName) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} +var ( + labelName LabelName + _ yaml.Unmarshaler = &labelName + _ json.Unmarshaler = &labelName +) // LabelNames is a sortable LabelName slice. In implements sort.Interface. type LabelNames []LabelName diff --git a/vendor/github.com/prometheus/common/model/labels_globalvalidationscheme.go b/vendor/github.com/prometheus/common/model/labels_globalvalidationscheme.go new file mode 100644 index 00000000000..0460a4d3206 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels_globalvalidationscheme.go @@ -0,0 +1,53 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !localvalidationscheme + +package model + +import ( + "encoding/json" + "fmt" +) + +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// scheme is LegacyValidation, or valid UTF-8 if it is UTF8Validation. +func (ln LabelName) IsValid() bool { + return ln.isValid(NameValidationScheme) +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/labels_localvalidationscheme.go b/vendor/github.com/prometheus/common/model/labels_localvalidationscheme.go new file mode 100644 index 00000000000..9b7e4aab6d8 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels_localvalidationscheme.go @@ -0,0 +1,55 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build localvalidationscheme + +package model + +import ( + "encoding/json" + "fmt" +) + +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// scheme is LegacyValidation, or valid UTF-8 if it is UTF8Validation. +func (ln LabelName) IsValid(validationScheme ValidationScheme) bool { + return ln.isValid(validationScheme) +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +// Validation is done using UTF8Validation. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid(UTF8Validation) { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// Validation is done using UTF8Validation. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid(UTF8Validation) { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go index d0ad88da334..f14d2d8a34d 100644 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -26,11 +26,9 @@ import ( // match. type LabelSet map[LabelName]LabelValue -// Validate checks whether all names and values in the label set -// are valid. -func (ls LabelSet) Validate() error { +func (ls LabelSet) validate(scheme ValidationScheme) error { for ln, lv := range ls { - if !ln.IsValid() { + if !ln.isValid(scheme) { return fmt.Errorf("invalid name %q", ln) } if !lv.IsValid() { @@ -139,20 +137,7 @@ func (ls LabelSet) FastFingerprint() Fingerprint { return labelSetToFastFingerprint(ls) } -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { - var m map[LabelName]LabelValue - if err := json.Unmarshal(b, &m); err != nil { - return err - } - // encoding/json only unmarshals maps of the form map[string]T. It treats - // LabelName as a string and does not call its UnmarshalJSON method. - // Thus, we have to replicate the behavior here. - for ln := range m { - if !ln.IsValid() { - return fmt.Errorf("%q is not a valid label name", ln) - } - } - *l = LabelSet(m) - return nil -} +var ( + labelSet LabelSet + _ json.Unmarshaler = &labelSet +) diff --git a/vendor/github.com/prometheus/common/model/labelset_globalvalidationscheme.go b/vendor/github.com/prometheus/common/model/labelset_globalvalidationscheme.go new file mode 100644 index 00000000000..f978a0d62b3 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset_globalvalidationscheme.go @@ -0,0 +1,45 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !localvalidationscheme + +package model + +import ( + "encoding/json" + "fmt" +) + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + return ls.validate(NameValidationScheme) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/labelset_localvalidationscheme.go b/vendor/github.com/prometheus/common/model/labelset_localvalidationscheme.go new file mode 100644 index 00000000000..5c4c282e4c9 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset_localvalidationscheme.go @@ -0,0 +1,46 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build localvalidationscheme + +package model + +import ( + "encoding/json" + "fmt" +) + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate(scheme ValidationScheme) error { + return ls.validate(scheme) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// Validates label names using UTF8Validation. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid(UTF8Validation) { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 2bd913fff21..abeb31cb66c 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -27,36 +27,13 @@ import ( "gopkg.in/yaml.v2" ) -var ( - // NameValidationScheme determines the global default method of the name - // validation to be used by all calls to IsValidMetricName() and LabelName - // IsValid(). - // - // Deprecated: This variable should not be used and might be removed in the - // far future. If you wish to stick to the legacy name validation use - // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods - // instead. This variable is here as an escape hatch for emergency cases, - // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., - // to delay UTF-8 migrations in time or aid in debugging unforeseen results of - // the change. In such a case, a temporary assignment to `LegacyValidation` - // value in the `init()` function in your main.go or so, could be considered. - // - // Historically we opted for a global variable for feature gating different - // validation schemes in operations that were not otherwise easily adjustable - // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate - // Labels structure or package might have been a better choice. Given the - // change was made and many upgraded the common already, we live this as-is - // with this warning and learning for the future. - NameValidationScheme = UTF8Validation - - // NameEscapingScheme defines the default way that names will be escaped when - // presented to systems that do not support UTF-8 names. If the Content-Type - // "escaping" term is specified, that will override this value. - // NameEscapingScheme should not be set to the NoEscaping value. That string - // is used in content negotiation to indicate that a system supports UTF-8 and - // has that feature enabled. - NameEscapingScheme = UnderscoreEscaping -) +// NameEscapingScheme defines the default way that names will be escaped when +// presented to systems that do not support UTF-8 names. If the Content-Type +// "escaping" term is specified, that will override this value. +// NameEscapingScheme should not be set to the NoEscaping value. That string +// is used in content negotiation to indicate that a system supports UTF-8 and +// has that feature enabled. +var NameEscapingScheme = UnderscoreEscaping // ValidationScheme is a Go enum for determining how metric and label names will // be validated by this library. @@ -227,11 +204,8 @@ func (m Metric) FastFingerprint() Fingerprint { return LabelSet(m).FastFingerprint() } -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE -// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is -// selected. -func IsValidMetricName(n LabelValue) bool { - switch NameValidationScheme { +func isValidMetricName(n LabelValue, scheme ValidationScheme) bool { + switch scheme { case LegacyValidation: return IsValidLegacyMetricName(string(n)) case UTF8Validation: @@ -240,12 +214,12 @@ func IsValidMetricName(n LabelValue) bool { } return utf8.ValidString(string(n)) default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %s", NameValidationScheme.String())) + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", scheme)) } } // IsValidLegacyMetricName is similar to IsValidMetricName but always uses the -// legacy validation scheme regardless of the value of NameValidationScheme. +// legacy validation scheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. func IsValidLegacyMetricName(n string) bool { diff --git a/vendor/github.com/prometheus/common/model/metric_globalvalidationscheme.go b/vendor/github.com/prometheus/common/model/metric_globalvalidationscheme.go new file mode 100644 index 00000000000..ff038d88c83 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric_globalvalidationscheme.go @@ -0,0 +1,43 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !localvalidationscheme + +package model + +// NameValidationScheme determines the global default method of the name +// validation to be used by all calls to IsValidMetricName() and LabelName +// IsValid(). +// +// Deprecated: This variable should not be used and might be removed in the +// far future. If you wish to stick to the legacy name validation use +// `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods +// instead. This variable is here as an escape hatch for emergency cases, +// given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., +// to delay UTF-8 migrations in time or aid in debugging unforeseen results of +// the change. In such a case, a temporary assignment to `LegacyValidation` +// value in the `init()` function in your main.go or so, could be considered. +// +// Historically we opted for a global variable for feature gating different +// validation schemes in operations that were not otherwise easily adjustable +// (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate +// Labels structure or package might have been a better choice. Given the +// change was made and many upgraded the common already, we live this as-is +// with this warning and learning for the future. +var NameValidationScheme = UTF8Validation + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE +// for legacy names, and iff it's valid UTF-8 if scheme is UTF8Validation. +func IsValidMetricName(n LabelValue) bool { + return isValidMetricName(n, NameValidationScheme) +} diff --git a/vendor/github.com/prometheus/common/model/metric_localvalidationscheme.go b/vendor/github.com/prometheus/common/model/metric_localvalidationscheme.go new file mode 100644 index 00000000000..1e7d0d74acb --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric_localvalidationscheme.go @@ -0,0 +1,22 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build localvalidationscheme + +package model + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE +// for legacy names, and iff it's valid UTF-8 if scheme is UTF8Validation. +func IsValidMetricName(n LabelValue, scheme ValidationScheme) bool { + return isValidMetricName(n, scheme) +} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index 8f91a9702e0..b5b2e21ac7b 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -45,9 +45,8 @@ func (m *Matcher) UnmarshalJSON(b []byte) error { return nil } -// Validate returns true iff all fields of the matcher have valid values. -func (m *Matcher) Validate() error { - if !m.Name.IsValid() { +func (m *Matcher) validate(scheme ValidationScheme) error { + if !m.Name.isValid(scheme) { return fmt.Errorf("invalid name %q", m.Name) } if m.IsRegex { @@ -76,12 +75,12 @@ type Silence struct { } // Validate returns true iff all fields of the silence have valid values. -func (s *Silence) Validate() error { +func (s *Silence) validate(scheme ValidationScheme) error { if len(s.Matchers) == 0 { return errors.New("at least one matcher required") } for _, m := range s.Matchers { - if err := m.Validate(); err != nil { + if err := m.validate(scheme); err != nil { return fmt.Errorf("invalid matcher: %w", err) } } diff --git a/vendor/github.com/prometheus/common/model/silence_globalvalidationscheme.go b/vendor/github.com/prometheus/common/model/silence_globalvalidationscheme.go new file mode 100644 index 00000000000..fd6c230860f --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence_globalvalidationscheme.go @@ -0,0 +1,26 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !localvalidationscheme + +package model + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + return m.validate(NameValidationScheme) +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + return s.validate(NameValidationScheme) +} diff --git a/vendor/github.com/prometheus/common/model/silence_localvalidationscheme.go b/vendor/github.com/prometheus/common/model/silence_localvalidationscheme.go new file mode 100644 index 00000000000..ed2889e48d0 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence_localvalidationscheme.go @@ -0,0 +1,26 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build localvalidationscheme + +package model + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate(scheme ValidationScheme) error { + return m.validate(scheme) +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate(scheme ValidationScheme) error { + return s.validate(scheme) +} diff --git a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go index 37475c3d6df..f3838d17ba0 100644 --- a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go +++ b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go @@ -13,6 +13,7 @@ import ( "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" @@ -42,7 +43,8 @@ type producer struct { func NewMetricProducer(opts ...Option) metric.Producer { cfg := newConfig(opts...) return &producer{ - gatherers: cfg.gatherers, + // TODO: Parameterize name validation scheme. + gatherers: prometheus.NewGatherers(cfg.gatherers, model.UTF8Validation), } } @@ -50,18 +52,18 @@ func (p *producer) Produce(context.Context) ([]metricdata.ScopeMetrics, error) { now := time.Now() var errs multierr otelMetrics := make([]metricdata.Metrics, 0) - for _, gatherer := range p.gatherers { + p.gatherers.Range(func(gatherer prometheus.Gatherer) { promMetrics, err := gatherer.Gather() if err != nil { errs = append(errs, err) - continue + return } m, err := convertPrometheusMetricsInto(promMetrics, now) otelMetrics = append(otelMetrics, m...) if err != nil { errs = append(errs, err) } - } + }) if errs.errOrNil() != nil { otel.Handle(errs.errOrNil()) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go index ceb2d63e2a9..b7b512cd203 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go @@ -25,6 +25,7 @@ type config struct { disableScopeInfo bool namespace string resourceAttributesFilter attribute.Filter + validationScheme model.ValidationScheme } var logDeprecatedLegacyScheme = sync.OnceFunc(func() { @@ -125,9 +126,8 @@ func WithoutCounterSuffixes() Option { }) } -// WithoutScopeInfo configures the Exporter to not export the otel_scope_info metric. -// If not specified, the Exporter will create a otel_scope_info metric containing -// the metrics' Instrumentation Scope, and also add labels about Instrumentation Scope to all metric points. +// WithoutScopeInfo configures the Exporter to not export +// labels about Instrumentation Scope to all metric points. func WithoutScopeInfo() Option { return optionFunc(func(cfg config) config { cfg.disableScopeInfo = true @@ -136,11 +136,11 @@ func WithoutScopeInfo() Option { } // WithNamespace configures the Exporter to prefix metric with the given namespace. -// Metadata metrics such as target_info and otel_scope_info are not prefixed since these +// Metadata metrics such as target_info are not prefixed since these // have special behavior based on their name. -func WithNamespace(ns string) Option { +func WithNamespace(ns string, validationScheme model.ValidationScheme) Option { return optionFunc(func(cfg config) config { - if model.NameValidationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. + if validationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. logDeprecatedLegacyScheme() // Only sanitize if prometheus does not support UTF-8. ns = model.EscapeName(ns, model.NameEscapingScheme) @@ -166,3 +166,12 @@ func WithResourceAsConstantLabels(resourceFilter attribute.Filter) Option { return cfg }) } + +// WithValidationScheme configures the Exporter to validate label and metric names +// according to this scheme. Defaults to UTF8Validation. +func WithValidationScheme(scheme model.ValidationScheme) Option { + return optionFunc(func(cfg config) config { + cfg.validationScheme = scheme + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go index e0959641caf..61482a3c0dd 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go @@ -21,7 +21,6 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" @@ -31,25 +30,20 @@ const ( targetInfoMetricName = "target_info" targetInfoDescription = "Target metadata" - scopeInfoMetricName = "otel_scope_info" - scopeInfoDescription = "Instrumentation Scope metadata" - - scopeNameLabel = "otel_scope_name" - scopeVersionLabel = "otel_scope_version" + scopeLabelPrefix = "otel_scope_" + scopeNameLabel = scopeLabelPrefix + "name" + scopeVersionLabel = scopeLabelPrefix + "version" + scopeSchemaLabel = scopeLabelPrefix + "schema_url" traceIDExemplarKey = "trace_id" spanIDExemplarKey = "span_id" ) -var ( - errScopeInvalid = errors.New("invalid scope") - - metricsPool = sync.Pool{ - New: func() interface{} { - return &metricdata.ResourceMetrics{} - }, - } -) +var metricsPool = sync.Pool{ + New: func() interface{} { + return &metricdata.ResourceMetrics{} + }, +} // Exporter is a Prometheus Exporter that embeds the OTel metric.Reader // interface for easy instantiation with a MeterProvider. @@ -97,10 +91,9 @@ type collector struct { mu sync.Mutex // mu protects all members below from the concurrent access. disableTargetInfo bool targetInfo prometheus.Metric - scopeInfos map[instrumentation.Scope]prometheus.Metric - scopeInfosInvalid map[instrumentation.Scope]struct{} metricFamilies map[string]*dto.MetricFamily resourceKeyVals keyVals + validationScheme model.ValidationScheme } // prometheus counters MUST have a _total suffix by default: @@ -122,11 +115,10 @@ func New(opts ...Option) (*Exporter, error) { withoutUnits: cfg.withoutUnits, withoutCounterSuffixes: cfg.withoutCounterSuffixes, disableScopeInfo: cfg.disableScopeInfo, - scopeInfos: make(map[instrumentation.Scope]prometheus.Metric), - scopeInfosInvalid: make(map[instrumentation.Scope]struct{}), metricFamilies: make(map[string]*dto.MetricFamily), namespace: cfg.namespace, resourceAttributesFilter: cfg.resourceAttributesFilter, + validationScheme: cfg.validationScheme, } if err := cfg.registerer.Register(collector); err != nil { @@ -174,7 +166,7 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { defer c.mu.Unlock() if c.targetInfo == nil && !c.disableTargetInfo { - targetInfo, err := createInfoMetric(targetInfoMetricName, targetInfoDescription, metrics.Resource) + targetInfo, err := createInfoMetric(targetInfoMetricName, targetInfoDescription, metrics.Resource, c.validationScheme) if err != nil { // If the target info metric is invalid, disable sending it. c.disableTargetInfo = true @@ -202,20 +194,15 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } if !c.disableScopeInfo { - scopeInfo, err := c.scopeInfo(scopeMetrics.Scope) - if errors.Is(err, errScopeInvalid) { - // Do not report the same error multiple times. - continue - } - if err != nil { - otel.Handle(err) - continue - } + kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel, scopeSchemaLabel) + kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version, scopeMetrics.Scope.SchemaURL) - ch <- scopeInfo - - kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel) - kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version) + attrKeys, attrVals := getAttrs(scopeMetrics.Scope.Attributes, c.validationScheme) + for i := range attrKeys { + attrKeys[i] = scopeLabelPrefix + attrKeys[i] + } + kv.keys = append(kv.keys, attrKeys...) + kv.vals = append(kv.vals, attrVals...) } kv.keys = append(kv.keys, c.resourceKeyVals.keys...) @@ -239,57 +226,131 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { switch v := m.Data.(type) { case metricdata.Histogram[int64]: - addHistogramMetric(ch, v, m, name, kv) + addHistogramMetric(ch, v, m, name, kv, c.validationScheme) case metricdata.Histogram[float64]: - addHistogramMetric(ch, v, m, name, kv) + addHistogramMetric(ch, v, m, name, kv, c.validationScheme) case metricdata.ExponentialHistogram[int64]: - addExponentialHistogramMetric(ch, v, m, name, kv) + addExponentialHistogramMetric(ch, v, m, name, kv, c.validationScheme) case metricdata.ExponentialHistogram[float64]: - addExponentialHistogramMetric(ch, v, m, name, kv) + addExponentialHistogramMetric(ch, v, m, name, kv, c.validationScheme) case metricdata.Sum[int64]: - addSumMetric(ch, v, m, name, kv) + addSumMetric(ch, v, m, name, kv, c.validationScheme) case metricdata.Sum[float64]: - addSumMetric(ch, v, m, name, kv) + addSumMetric(ch, v, m, name, kv, c.validationScheme) case metricdata.Gauge[int64]: - addGaugeMetric(ch, v, m, name, kv) + addGaugeMetric(ch, v, m, name, kv, c.validationScheme) case metricdata.Gauge[float64]: - addGaugeMetric(ch, v, m, name, kv) + addGaugeMetric(ch, v, m, name, kv, c.validationScheme) } } } } +// downscaleExponentialBucket re-aggregates bucket counts when downscaling to a coarser resolution. +func downscaleExponentialBucket(bucket metricdata.ExponentialBucket, scaleDelta int32) metricdata.ExponentialBucket { + if len(bucket.Counts) == 0 || scaleDelta < 1 { + return metricdata.ExponentialBucket{ + Offset: bucket.Offset >> scaleDelta, + Counts: append([]uint64(nil), bucket.Counts...), // copy slice + } + } + + // The new offset is scaled down + newOffset := bucket.Offset >> scaleDelta + + // Pre-calculate the new bucket count to avoid growing slice + // Each group of 2^scaleDelta buckets will merge into one bucket + //nolint:gosec // Length is bounded by slice allocation + lastBucketIdx := bucket.Offset + int32(len(bucket.Counts)) - 1 + lastNewIdx := lastBucketIdx >> scaleDelta + newBucketCount := int(lastNewIdx - newOffset + 1) + + if newBucketCount <= 0 { + return metricdata.ExponentialBucket{ + Offset: newOffset, + Counts: []uint64{}, + } + } + + newCounts := make([]uint64, newBucketCount) + + // Merge buckets according to the scale difference + for i, count := range bucket.Counts { + if count == 0 { + continue + } + + // Calculate which new bucket this count belongs to + //nolint:gosec // Index is bounded by loop iteration + originalIdx := bucket.Offset + int32(i) + newIdx := originalIdx >> scaleDelta + + // Calculate the position in the new counts array + position := newIdx - newOffset + //nolint:gosec // Length is bounded by allocation + if position >= 0 && position < int32(len(newCounts)) { + newCounts[position] += count + } + } + + return metricdata.ExponentialBucket{ + Offset: newOffset, + Counts: newCounts, + } +} + func addExponentialHistogramMetric[N int64 | float64]( ch chan<- prometheus.Metric, histogram metricdata.ExponentialHistogram[N], m metricdata.Metrics, name string, kv keyVals, + validationScheme model.ValidationScheme, ) { for _, dp := range histogram.DataPoints { - keys, values := getAttrs(dp.Attributes) + keys, values := getAttrs(dp.Attributes, validationScheme) keys = append(keys, kv.keys...) values = append(values, kv.vals...) desc := prometheus.NewDesc(name, m.Description, keys, nil) + // Prometheus native histograms support scales in the range [-4, 8] + scale := dp.Scale + if scale < -4 { + // Reject scales below -4 as they cannot be represented in Prometheus + otel.Handle(fmt.Errorf( + "exponential histogram scale %d is below minimum supported scale -4, skipping data point", + scale)) + continue + } + + // If scale > 8, we need to downscale the buckets to match the clamped scale + positiveBucket := dp.PositiveBucket + negativeBucket := dp.NegativeBucket + if scale > 8 { + scaleDelta := scale - 8 + positiveBucket = downscaleExponentialBucket(dp.PositiveBucket, scaleDelta) + negativeBucket = downscaleExponentialBucket(dp.NegativeBucket, scaleDelta) + scale = 8 + } + // From spec: note that Prometheus Native Histograms buckets are indexed by upper boundary while Exponential Histograms are indexed by lower boundary, the result being that the Offset fields are different-by-one. positiveBuckets := make(map[int]int64) - for i, c := range dp.PositiveBucket.Counts { + for i, c := range positiveBucket.Counts { if c > math.MaxInt64 { otel.Handle(fmt.Errorf("positive count %d is too large to be represented as int64", c)) continue } - positiveBuckets[int(dp.PositiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. + positiveBuckets[int(positiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. } negativeBuckets := make(map[int]int64) - for i, c := range dp.NegativeBucket.Counts { + for i, c := range negativeBucket.Counts { if c > math.MaxInt64 { otel.Handle(fmt.Errorf("negative count %d is too large to be represented as int64", c)) continue } - negativeBuckets[int(dp.NegativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. + negativeBuckets[int(negativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. } m, err := prometheus.NewConstNativeHistogram( @@ -299,7 +360,7 @@ func addExponentialHistogramMetric[N int64 | float64]( positiveBuckets, negativeBuckets, dp.ZeroCount, - dp.Scale, + scale, dp.ZeroThreshold, dp.StartTime, values...) @@ -319,9 +380,10 @@ func addHistogramMetric[N int64 | float64]( m metricdata.Metrics, name string, kv keyVals, + validationScheme model.ValidationScheme, ) { for _, dp := range histogram.DataPoints { - keys, values := getAttrs(dp.Attributes) + keys, values := getAttrs(dp.Attributes, validationScheme) keys = append(keys, kv.keys...) values = append(values, kv.vals...) @@ -349,6 +411,7 @@ func addSumMetric[N int64 | float64]( m metricdata.Metrics, name string, kv keyVals, + validationScheme model.ValidationScheme, ) { valueType := prometheus.CounterValue if !sum.IsMonotonic { @@ -356,7 +419,7 @@ func addSumMetric[N int64 | float64]( } for _, dp := range sum.DataPoints { - keys, values := getAttrs(dp.Attributes) + keys, values := getAttrs(dp.Attributes, validationScheme) keys = append(keys, kv.keys...) values = append(values, kv.vals...) @@ -381,9 +444,10 @@ func addGaugeMetric[N int64 | float64]( m metricdata.Metrics, name string, kv keyVals, + validationScheme model.ValidationScheme, ) { for _, dp := range gauge.DataPoints { - keys, values := getAttrs(dp.Attributes) + keys, values := getAttrs(dp.Attributes, validationScheme) keys = append(keys, kv.keys...) values = append(values, kv.vals...) @@ -399,12 +463,12 @@ func addGaugeMetric[N int64 | float64]( // getAttrs converts the attribute.Set to two lists of matching Prometheus-style // keys and values. -func getAttrs(attrs attribute.Set) ([]string, []string) { +func getAttrs(attrs attribute.Set, validationScheme model.ValidationScheme) ([]string, []string) { keys := make([]string, 0, attrs.Len()) values := make([]string, 0, attrs.Len()) itr := attrs.Iter() - if model.NameValidationScheme == model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. + if validationScheme == model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. // Do not perform sanitization if prometheus supports UTF-8. for itr.Next() { kv := itr.Attribute() @@ -434,21 +498,17 @@ func getAttrs(attrs attribute.Set) ([]string, []string) { return keys, values } -func createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) { - keys, values := getAttrs(*res.Set()) +func createInfoMetric(name, description string, res *resource.Resource, validationScheme model.ValidationScheme) (prometheus.Metric, error) { + keys, values := getAttrs(*res.Set(), validationScheme) desc := prometheus.NewDesc(name, description, keys, nil) return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...) } -func createScopeInfoMetric(scope instrumentation.Scope) (prometheus.Metric, error) { - attrs := make([]attribute.KeyValue, 0, scope.Attributes.Len()+2) // resource attrs + scope name + scope version - attrs = append(attrs, scope.Attributes.ToSlice()...) - attrs = append(attrs, attribute.String(scopeNameLabel, scope.Name)) - attrs = append(attrs, attribute.String(scopeVersionLabel, scope.Version)) - - keys, values := getAttrs(attribute.NewSet(attrs...)) - desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil) - return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...) +func unitMapGetOrDefault(unit string) string { + if promUnit, ok := unitSuffixes[unit]; ok { + return promUnit + } + return unit } var unitSuffixes = map[string]string{ @@ -490,7 +550,7 @@ var unitSuffixes = map[string]string{ // getName returns the sanitized name, prefixed with the namespace and suffixed with unit. func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string { name := m.Name - if model.NameValidationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. + if c.validationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. // Only sanitize if prometheus does not support UTF-8. logDeprecatedLegacyScheme() name = model.EscapeName(name, model.NameEscapingScheme) @@ -509,7 +569,7 @@ func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string { if c.namespace != "" { name = c.namespace + name } - if suffix, ok := unitSuffixes[m.Unit]; ok && !c.withoutUnits && !strings.HasSuffix(name, suffix) { + if suffix := unitMapGetOrDefault(m.Unit); suffix != "" && !c.withoutUnits && !strings.HasSuffix(name, suffix) { name += "_" + suffix } if addCounterSuffix { @@ -552,34 +612,10 @@ func (c *collector) createResourceAttributes(res *resource.Resource) { defer c.mu.Unlock() resourceAttrs, _ := res.Set().Filter(c.resourceAttributesFilter) - resourceKeys, resourceValues := getAttrs(resourceAttrs) + resourceKeys, resourceValues := getAttrs(resourceAttrs, c.validationScheme) c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues} } -func (c *collector) scopeInfo(scope instrumentation.Scope) (prometheus.Metric, error) { - c.mu.Lock() - defer c.mu.Unlock() - - scopeInfo, ok := c.scopeInfos[scope] - if ok { - return scopeInfo, nil - } - - if _, ok := c.scopeInfosInvalid[scope]; ok { - return nil, errScopeInvalid - } - - scopeInfo, err := createScopeInfoMetric(scope) - if err != nil { - c.scopeInfosInvalid[scope] = struct{}{} - return nil, fmt.Errorf("cannot create scope info metric: %w", err) - } - - c.scopeInfos[scope] = scopeInfo - - return scopeInfo, nil -} - func (c *collector) validateMetrics(name, description string, metricType *dto.MetricType) (drop bool, help string) { c.mu.Lock() defer c.mu.Unlock() @@ -634,7 +670,8 @@ func addExemplars[N int64 | float64](m prometheus.Metric, exemplars []metricdata Labels: labels, } } - metricWithExemplar, err := prometheus.NewMetricWithExemplars(m, promExemplars...) + // TODO: Parameterize name validation scheme. + metricWithExemplar, err := newMetricWithExemplars(m, model.UTF8Validation, promExemplars...) if err != nil { // If there are errors creating the metric with exemplars, just warn // and return the metric without exemplars. diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_globalvalidationscheme.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_globalvalidationscheme.go new file mode 100644 index 00000000000..0d71a9e8677 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_globalvalidationscheme.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !localvalidationscheme + +package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" +) + +func newMetricWithExemplars(m prometheus.Metric, scheme model.ValidationScheme, exemplars ...prometheus.Exemplar) (prometheus.Metric, error) { + return prometheus.NewMetricWithExemplars(m, exemplars...) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_localvalidationscheme.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_localvalidationscheme.go new file mode 100644 index 00000000000..d8a61093470 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_localvalidationscheme.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build localvalidationscheme + +package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" +) + +func newMetricWithExemplars(m prometheus.Metric, scheme model.ValidationScheme, exemplars ...prometheus.Exemplar) (prometheus.Metric, error) { + return prometheus.NewMetricWithExemplars(m, scheme, exemplars...) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index ebb9a0463b3..0a48aed74dd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -202,7 +202,7 @@ func (r *PeriodicReader) aggregation( // collectAndExport gather all metric data related to the periodicReader r from // the SDK and exports it with r's exporter. func (r *PeriodicReader) collectAndExport(ctx context.Context) error { - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := context.WithTimeoutCause(ctx, r.timeout, errors.New("reader collect and export timeout")) defer cancel() // TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect. @@ -278,7 +278,7 @@ func (r *PeriodicReader) ForceFlush(ctx context.Context) error { // Prioritize the ctx timeout if it is set. if _, ok := ctx.Deadline(); !ok { var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, r.timeout) + ctx, cancel = context.WithTimeoutCause(ctx, r.timeout, errors.New("reader force flush timeout")) defer cancel() } @@ -311,7 +311,7 @@ func (r *PeriodicReader) Shutdown(ctx context.Context) error { // Prioritize the ctx timeout if it is set. if _, ok := ctx.Deadline(); !ok { var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, r.timeout) + ctx, cancel = context.WithTimeoutCause(ctx, r.timeout, errors.New("reader shutdown timeout")) defer cancel() } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index 2240c26e9b4..7bdb699cae0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -121,6 +121,14 @@ func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) { // // This method is safe to call concurrently. func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error { + // Only check if context is already cancelled before starting, not inside or after callback loops. + // If this method returns after executing some callbacks but before running all aggregations, + // internal aggregation state can be corrupted and result in incorrect data returned + // by future produce calls. + if err := ctx.Err(); err != nil { + return err + } + p.Lock() defer p.Unlock() @@ -130,12 +138,6 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) if e := c(ctx); e != nil { err = errors.Join(err, e) } - if err := ctx.Err(); err != nil { - rm.Resource = nil - clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. - rm.ScopeMetrics = rm.ScopeMetrics[:0] - return err - } } for e := p.multiCallbacks.Front(); e != nil; e = e.Next() { // TODO make the callbacks parallel. ( #3034 ) @@ -143,13 +145,6 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) if e := f(ctx); e != nil { err = errors.Join(err, e) } - if err := ctx.Err(); err != nil { - // This means the context expired before we finished running callbacks. - rm.Resource = nil - clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. - rm.ScopeMetrics = rm.ScopeMetrics[:0] - return err - } } rm.Resource = p.resource diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index cda142c7ea2..0e5adc1a766 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.36.0" + return "1.37.0" } diff --git a/vendor/modules.txt b/vendor/modules.txt index c1af96cda5a..6bf73df1dea 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1099,7 +1099,7 @@ github.com/pmezard/go-difflib/difflib # github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c ## explicit; go 1.14 github.com/power-devops/perfstat -# github.com/prometheus/alertmanager v0.28.1 => github.com/grafana/prometheus-alertmanager v0.25.1-0.20250620093340-be61a673dee6 +# github.com/prometheus/alertmanager v0.28.1 => github.com/juliusmh/alertmanager v0.26.1-0.20250624114102-96969065f8d2 ## explicit; go 1.23.0 github.com/prometheus/alertmanager/api github.com/prometheus/alertmanager/api/metrics @@ -1169,7 +1169,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.2 ## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.65.1-0.20250711183725-0e1982f10d4c +# github.com/prometheus/common v0.65.1-0.20250714091050-c6ae72fb63e9 ## explicit; go 1.23.0 github.com/prometheus/common/config github.com/prometheus/common/expfmt @@ -1509,7 +1509,7 @@ go.opentelemetry.io/collector/semconv/v1.6.1 # go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/bridges/otelzap -# go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 +# go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 => github.com/aknuds1/opentelemetry-go-contrib/bridges/prometheus v0.0.0-20250716061915-e4a04e1efdd8 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/bridges/prometheus # go.opentelemetry.io/contrib/detectors/gcp v1.36.0 @@ -1540,7 +1540,7 @@ go.opentelemetry.io/contrib/propagators/jaeger ## explicit; go 1.23.0 go.opentelemetry.io/contrib/samplers/jaegerremote go.opentelemetry.io/contrib/samplers/jaegerremote/internal/utils -# go.opentelemetry.io/otel v1.37.0 +# go.opentelemetry.io/otel v1.37.0 => github.com/aknuds1/opentelemetry-go v0.0.0-20250714105753-6d10dabef4d5 ## explicit; go 1.23.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1608,7 +1608,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry -# go.opentelemetry.io/otel/exporters/prometheus v0.58.0 +# go.opentelemetry.io/otel/exporters/prometheus v0.58.0 => github.com/aknuds1/opentelemetry-go/exporters/prometheus v0.0.0-20250714105753-6d10dabef4d5 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/prometheus # go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 @@ -1646,7 +1646,7 @@ go.opentelemetry.io/otel/sdk/trace/tracetest # go.opentelemetry.io/otel/sdk/log v0.12.2 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk/log -# go.opentelemetry.io/otel/sdk/metric v1.36.0 +# go.opentelemetry.io/otel/sdk/metric v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk/metric go.opentelemetry.io/otel/sdk/metric/exemplar @@ -2113,12 +2113,15 @@ sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 # github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v1.8.2-0.20250725113505-6dd7af9abc56 +# github.com/prometheus/alertmanager => github.com/juliusmh/alertmanager v0.26.1-0.20250624114102-96969065f8d2 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20250428154222-f7d51a6f6700 # gopkg.in/yaml.v3 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20240531075221-3685f1377d7b # github.com/munnerz/goautoneg => github.com/grafana/goautoneg v0.0.0-20240607115440-f335c04c58ce # github.com/opentracing-contrib/go-stdlib => github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956 # github.com/opentracing-contrib/go-grpc => github.com/charleskorn/go-grpc v0.0.0-20231024023642-e9298576254f -# github.com/prometheus/alertmanager => github.com/grafana/prometheus-alertmanager v0.25.1-0.20250620093340-be61a673dee6 # github.com/prometheus/otlptranslator => github.com/grafana/mimir-otlptranslator v0.0.0-20250703083430-c31a9568ad96 # github.com/thanos-io/objstore => github.com/charleskorn/objstore v0.0.0-20250527065533-21d4c0c463eb +# go.opentelemetry.io/contrib/bridges/prometheus => github.com/aknuds1/opentelemetry-go-contrib/bridges/prometheus v0.0.0-20250716061915-e4a04e1efdd8 +# go.opentelemetry.io/otel/exporters/prometheus => github.com/aknuds1/opentelemetry-go/exporters/prometheus v0.0.0-20250714105753-6d10dabef4d5 +# go.opentelemetry.io/otel => github.com/aknuds1/opentelemetry-go v0.0.0-20250714105753-6d10dabef4d5 From ed9101a17d62d5105c2103bf8827231240969e29 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Wed, 16 Jul 2025 11:30:18 +0200 Subject: [PATCH 02/10] Don't change prometheus/common API Signed-off-by: Arve Knudsen --- .../scripts/run-integration-tests-group.sh | 4 +- .../workflows/scripts/run-unit-tests-group.sh | 2 +- .golangci.yml | 1 - Makefile | 66 +-- .../mimir-ingest-storage/compose-up.sh | 2 +- .../mimir-microservices-mode/compose-up.sh | 4 +- .../mimir-monolithic-mode/compose-up.sh | 2 +- go.mod | 14 +- go.sum | 68 +-- pkg/cardinality/request.go | 2 +- pkg/distributor/validate.go | 5 +- pkg/frontend/querymiddleware/error_caching.go | 3 +- .../request_validation_test.go | 1 + pkg/mimir/modules.go | 1 + pkg/mimirtool/commands/analyse_rulefiles.go | 8 +- pkg/mimirtool/commands/rules.go | 19 +- pkg/mimirtool/rules/parser.go | 15 +- pkg/mimirtool/rules/rules.go | 9 +- pkg/querier/querier.go | 1 + pkg/ruler/ruler.go | 5 + .../operators/aggregations/count_values.go | 2 +- .../operators/functions/label.go | 8 +- tools/benchmark-query-engine/main.go | 2 +- .../prometheus/common/expfmt/decode.go | 20 +- .../expfmt/decode_globalvalidationscheme.go | 48 -- .../expfmt/decode_localvalidationscheme.go | 52 --- .../prometheus/common/model/alert.go | 6 +- .../model/alert_globalvalidationscheme.go | 21 - .../model/alert_localvalidationscheme.go | 21 - .../prometheus/common/model/labels.go | 41 +- .../model/labels_globalvalidationscheme.go | 53 --- .../model/labels_localvalidationscheme.go | 55 --- .../prometheus/common/model/labelset.go | 27 +- .../model/labelset_globalvalidationscheme.go | 45 -- .../model/labelset_localvalidationscheme.go | 46 -- .../prometheus/common/model/metric.go | 48 +- .../model/metric_globalvalidationscheme.go | 43 -- .../model/metric_localvalidationscheme.go | 22 - .../prometheus/common/model/silence.go | 9 +- .../model/silence_globalvalidationscheme.go | 26 -- .../model/silence_localvalidationscheme.go | 26 -- .../prometheus/common/promslog/slog.go | 14 - .../prometheus/prometheus/config/config.go | 69 +-- .../prometheus/discovery/manager.go | 22 +- .../{labels_slicelabels.go => labels.go} | 0 .../prometheus/model/labels/validate.go | 32 ++ .../prometheus/model/relabel/relabel.go | 26 +- .../prometheus/model/rulefmt/rulefmt.go | 44 +- .../prometheus/prometheus/notifier/manager.go | 11 +- .../prometheus/prometheus/promql/durations.go | 14 +- .../prometheus/prometheus/promql/engine.go | 105 +++-- .../prometheus/prometheus/promql/functions.go | 419 +++++++++--------- .../promql/parser/generated_parser.y | 6 +- .../promql/parser/generated_parser.y.go | 6 +- .../prometheus/promql/parser/parse.go | 11 + .../promqltest/testdata/aggregators.test | 152 ++----- .../promqltest/testdata/at_modifier.test | 3 +- .../promql/promqltest/testdata/collision.test | 3 +- .../promql/promqltest/testdata/functions.test | 189 ++------ .../promqltest/testdata/histograms.test | 121 +---- .../promql/promqltest/testdata/limit.test | 8 +- .../testdata/name_label_dropping.test | 3 +- .../testdata/native_histograms.test | 225 +++------- .../promql/promqltest/testdata/operators.test | 231 +++------- .../promql/promqltest/testdata/subquery.test | 2 - .../prometheus/prometheus/promql/value.go | 8 +- .../prometheus/prometheus/scrape/manager.go | 5 +- .../prometheus/prometheus/scrape/scrape.go | 37 +- .../prometheusremotewrite/helper.go | 11 +- .../prometheusremotewrite/metrics_to_prw.go | 4 +- .../storage/remote/write_handler.go | 7 +- .../prometheus/template/template.go | 13 +- .../tsdb/chunkenc/float_histogram.go | 134 +++--- .../prometheus/tsdb/chunkenc/histogram.go | 134 +++--- .../tsdb/chunkenc/histogram_meta.go | 163 +++++-- .../prometheus/prometheus/tsdb/compact.go | 8 + .../util/annotations/annotations.go | 2 +- .../prometheus/prometheus/web/api/v1/api.go | 3 +- .../contrib/bridges/prometheus/producer.go | 10 +- .../otel/exporters/prometheus/config.go | 21 +- .../otel/exporters/prometheus/exporter.go | 219 ++++----- .../exporter_globalvalidationscheme.go | 15 - .../exporter_localvalidationscheme.go | 15 - .../otel/sdk/metric/periodic_reader.go | 6 +- .../otel/sdk/metric/pipeline.go | 21 +- .../otel/sdk/metric/version.go | 2 +- .../google.golang.org/api/internal/version.go | 2 +- vendor/modules.txt | 19 +- 88 files changed, 1309 insertions(+), 2119 deletions(-) delete mode 100644 vendor/github.com/prometheus/common/expfmt/decode_globalvalidationscheme.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/decode_localvalidationscheme.go delete mode 100644 vendor/github.com/prometheus/common/model/alert_globalvalidationscheme.go delete mode 100644 vendor/github.com/prometheus/common/model/alert_localvalidationscheme.go delete mode 100644 vendor/github.com/prometheus/common/model/labels_globalvalidationscheme.go delete mode 100644 vendor/github.com/prometheus/common/model/labels_localvalidationscheme.go delete mode 100644 vendor/github.com/prometheus/common/model/labelset_globalvalidationscheme.go delete mode 100644 vendor/github.com/prometheus/common/model/labelset_localvalidationscheme.go delete mode 100644 vendor/github.com/prometheus/common/model/metric_globalvalidationscheme.go delete mode 100644 vendor/github.com/prometheus/common/model/metric_localvalidationscheme.go delete mode 100644 vendor/github.com/prometheus/common/model/silence_globalvalidationscheme.go delete mode 100644 vendor/github.com/prometheus/common/model/silence_localvalidationscheme.go rename vendor/github.com/prometheus/prometheus/model/labels/{labels_slicelabels.go => labels.go} (100%) create mode 100644 vendor/github.com/prometheus/prometheus/model/labels/validate.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_globalvalidationscheme.go delete mode 100644 vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_localvalidationscheme.go diff --git a/.github/workflows/scripts/run-integration-tests-group.sh b/.github/workflows/scripts/run-integration-tests-group.sh index 4d43668216b..cdd0759687c 100755 --- a/.github/workflows/scripts/run-integration-tests-group.sh +++ b/.github/workflows/scripts/run-integration-tests-group.sh @@ -37,7 +37,7 @@ if [[ -z "$TOTAL" ]]; then fi # List all tests. -ALL_TESTS=$(go test -tags=requires_docker,stringlabels,localvalidationscheme -list 'Test.*' "${INTEGRATION_DIR}/..." | grep -E '^Test.*' | sort) +ALL_TESTS=$(go test -tags=requires_docker,stringlabels -list 'Test.*' "${INTEGRATION_DIR}/..." | grep -E '^Test.*' | sort) # Filter tests by the requested group. GROUP_TESTS=$(echo "$ALL_TESTS" | awk -v TOTAL="$TOTAL" -v INDEX="$INDEX" 'NR % TOTAL == INDEX') @@ -58,4 +58,4 @@ REGEX="${REGEX})$" # that integration tests will fail on data races. export MIMIR_ENV_VARS_JSON='{"GORACE": "halt_on_error=1"}' -exec go test -tags=requires_docker,stringlabels,localvalidationscheme -timeout 2400s -v -count=1 -run "${REGEX}" "${INTEGRATION_DIR}/..." +exec go test -tags=requires_docker,stringlabels -timeout 2400s -v -count=1 -run "${REGEX}" "${INTEGRATION_DIR}/..." diff --git a/.github/workflows/scripts/run-unit-tests-group.sh b/.github/workflows/scripts/run-unit-tests-group.sh index b82bd1b5668..5959f3ddd37 100755 --- a/.github/workflows/scripts/run-unit-tests-group.sh +++ b/.github/workflows/scripts/run-unit-tests-group.sh @@ -47,4 +47,4 @@ echo "$GROUP_TESTS" echo "" # shellcheck disable=SC2086 # we *want* word splitting of GROUP_TESTS. -exec go test -tags=netgo,stringlabels,localvalidationscheme -timeout 30m -race ${GROUP_TESTS} +exec go test -tags=netgo,stringlabels -timeout 30m -race ${GROUP_TESTS} diff --git a/.golangci.yml b/.golangci.yml index cbeee530971..2a37d5be725 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -10,7 +10,6 @@ run: - stringlabels - requires_docker - requires_libpcap - - localvalidationscheme output: formats: text: diff --git a/Makefile b/Makefile index 210073f773f..07726872cf5 100644 --- a/Makefile +++ b/Makefile @@ -261,7 +261,7 @@ GO_FLAGS := -ldflags "\ -X $(MIMIR_VERSION).Branch=$(GIT_BRANCH) \ -X $(MIMIR_VERSION).Revision=$(GIT_REVISION) \ -X $(MIMIR_VERSION).Version=$(VERSION) \ - -extldflags \"-static\" -s -w" -tags netgo,stringlabels,localvalidationscheme + -extldflags \"-static\" -s -w" -tags netgo,stringlabels ifeq ($(BUILD_IN_CONTAINER),true) @@ -318,7 +318,7 @@ lint: check-makefiles golangci-lint run # Ensure no blocklisted package is imported. - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/bmizerany/assert=github.com/stretchr/testify/assert,\ + GOFLAGS="-tags=requires_docker,stringlabels" faillint -paths "github.com/bmizerany/assert=github.com/stretchr/testify/assert,\ golang.org/x/net/context=context,\ sync/atomic=go.uber.org/atomic,\ regexp=github.com/grafana/regexp,\ @@ -328,31 +328,31 @@ lint: check-makefiles github.com/weaveworks/common/user.{ExtractOrgIDFromHTTPRequest}=github.com/grafana/mimir/pkg/tenant.{ExtractTenantIDFromHTTPRequest}" ./pkg/... ./cmd/... ./tools/... ./integration/... # Ensure clean pkg structure. - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "\ + faillint -paths "\ github.com/grafana/mimir/pkg/scheduler,\ github.com/grafana/mimir/pkg/frontend,\ github.com/grafana/mimir/pkg/frontend/transport,\ github.com/grafana/mimir/pkg/frontend/v1,\ github.com/grafana/mimir/pkg/frontend/v2" \ ./pkg/querier/... - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/querier/..." ./pkg/scheduler/... - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/storage/tsdb/..." ./pkg/storage/bucket/... - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/alertmanager/alertspb/... - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/ruler/rulespb/... - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/storage/sharding/... - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/querier/engine/... - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/querier/api/... - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/util/math/... + faillint -paths "github.com/grafana/mimir/pkg/querier/..." ./pkg/scheduler/... + faillint -paths "github.com/grafana/mimir/pkg/storage/tsdb/..." ./pkg/storage/bucket/... + faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/alertmanager/alertspb/... + faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/ruler/rulespb/... + faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/storage/sharding/... + faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/querier/engine/... + faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/querier/api/... + faillint -paths "github.com/grafana/mimir/pkg/..." ./pkg/util/math/... # Ensure all errors are reported as APIError - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/weaveworks/common/httpgrpc.{Errorf}=github.com/grafana/mimir/pkg/api/error.Newf" ./pkg/frontend/querymiddleware/... + faillint -paths "github.com/weaveworks/common/httpgrpc.{Errorf}=github.com/grafana/mimir/pkg/api/error.Newf" ./pkg/frontend/querymiddleware/... # errors.Cause() only work on errors wrapped by github.com/pkg/errors, while it doesn't work # on errors wrapped by golang standard errors package. In Mimir we currently use github.com/pkg/errors # but other vendors we depend on (e.g. Prometheus) just uses the standard errors package. # For this reason, we recommend to not use errors.Cause() anywhere, so that we don't have to # question whether the usage is safe or not. - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/pkg/errors.{Cause}" ./pkg/... ./cmd/... ./tools/... ./integration/... + faillint -paths "github.com/pkg/errors.{Cause}" ./pkg/... ./cmd/... ./tools/... ./integration/... # gogo/status allows to easily customize error details while grpc/status doesn't: # for this reason we use gogo/status in several places. However, gogo/status.FromError() @@ -361,13 +361,13 @@ lint: check-makefiles # Since we want support for errors wrapping everywhere, to avoid subtle bugs depending # on which status package is imported, we don't allow .FromError() from both packages # and we require to use grpcutil.ErrorToStatus() instead. - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "\ + faillint -paths "\ google.golang.org/grpc/status.{FromError}=github.com/grafana/dskit/grpcutil.ErrorToStatus,\ github.com/gogo/status.{FromError}=github.com/grafana/dskit/grpcutil.ErrorToStatus" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # Ensure the query path is supporting multiple tenants - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "\ + faillint -paths "\ github.com/grafana/mimir/pkg/tenant.{TenantID}=github.com/grafana/mimir/pkg/tenant.{TenantIDs}" \ ./pkg/scheduler/... \ ./pkg/frontend/... \ @@ -375,7 +375,7 @@ lint: check-makefiles ./pkg/frontend/querymiddleware/... # Ensure packages that no longer use a global logger don't reintroduce it - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/grafana/mimir/pkg/util/log.{Logger}" \ + faillint -paths "github.com/grafana/mimir/pkg/util/log.{Logger}" \ ./pkg/alertmanager/... \ ./pkg/compactor/... \ ./pkg/distributor/... \ @@ -390,22 +390,22 @@ lint: check-makefiles # We've copied github.com/NYTimes/gziphandler to pkg/util/gziphandler # at least until https://github.com/nytimes/gziphandler/pull/112 is merged - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "github.com/NYTimes/gziphandler" \ + faillint -paths "github.com/NYTimes/gziphandler" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # We don't want to use yaml.v2 anywhere, because we use yaml.v3 now, # and UnamrshalYAML signature is not compatible between them. - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths "gopkg.in/yaml.v2" \ + faillint -paths "gopkg.in/yaml.v2" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # Ensure packages we imported from Thanos are no longer used. - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ + GOFLAGS="-tags=requires_docker,stringlabels" faillint -paths \ "github.com/thanos-io/thanos/pkg/..." \ ./pkg/... ./cmd/... ./tools/... ./integration/... # Ensure we never use the default registerer and we allow to use a custom one (improves testability). # Also, ensure we use promauto.With() to reduce the chances we forget to register metrics. - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ + faillint -paths \ "github.com/prometheus/client_golang/prometheus/promauto.{NewCounter,NewCounterVec,NewCounterFunc,NewGauge,NewGaugeVec,NewGaugeFunc,NewSummary,NewSummaryVec,NewHistogram,NewHistogramVec}=github.com/prometheus/client_golang/prometheus/promauto.With,\ github.com/prometheus/client_golang/prometheus.{MustRegister,Register,DefaultRegisterer}=github.com/prometheus/client_golang/prometheus/promauto.With,\ github.com/prometheus/client_golang/prometheus.{NewCounter,NewCounterVec,NewCounterFunc,NewGauge,NewGaugeVec,NewGaugeFunc,NewSummary,NewSummaryVec,NewHistogram,NewHistogramVec}=github.com/prometheus/client_golang/prometheus/promauto.With" \ @@ -414,7 +414,7 @@ lint: check-makefiles # Use the faster slices.Sort where we can. # Note that we don't automatically suggest replacing sort.Float64s() with slices.Sort() as the documentation for slices.Sort() # at the time of writing warns that slices.Sort() may not correctly handle NaN values. - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ + faillint -paths \ "sort.{Strings,Ints}=slices.Sort" \ ./pkg/... ./cmd/... ./tools/... ./integration/... @@ -425,44 +425,44 @@ lint: check-makefiles # Don't use generic ring.Read operation. # ring.Read usually isn't the right choice, and we prefer that each component define its operations explicitly. - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ + faillint -paths \ "github.com/grafana/dskit/ring.{Read}" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # Do not directly call flag.Parse() and argument getters, to try to reduce risk of misuse. - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ + faillint -paths \ "flag.{Parse,NArg,Arg,Args}=github.com/grafana/dskit/flagext.{ParseFlagsAndArguments,ParseFlagsWithoutArguments}" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # Ensure we use our custom gRPC clients. - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ + faillint -paths \ "github.com/grafana/mimir/pkg/storegateway/storegatewaypb.{NewStoreGatewayClient}=github.com/grafana/mimir/pkg/storegateway/storegatewaypb.NewCustomStoreGatewayClient" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # Prefer using WithCancelCause in production code, so that cancelled contexts have more information available from context.Cause(ctx). - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -ignore-tests -paths \ + faillint -ignore-tests -paths \ "context.{WithCancel}=context.WithCancelCause" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # Do not use the object storage client intended only for tools within Mimir itself - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ + faillint -paths \ "github.com/grafana/mimir/pkg/util/objtools" \ ./pkg/... ./cmd/... ./integration/... # Use the more performant metadata.ValueFromIncomingContext wherever possible (if not possible, we can always put # a lint ignore directive to skip linting). - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ + faillint -paths \ "google.golang.org/grpc/metadata.{FromIncomingContext}=google.golang.org/grpc/metadata.ValueFromIncomingContext" \ ./pkg/... ./cmd/... ./integration/... # We don't use topic auto-creation because we don't control the num.partitions. # As a result the topic can be created with the wrong number of partitions. - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ + faillint -paths \ "github.com/twmb/franz-go/pkg/kgo.{AllowAutoTopicCreation}" \ ./pkg/... ./cmd/... ./tools/... ./integration/... # We don't use opentracing anymore. - GOFLAGS="-tags=requires_docker,stringlabels,localvalidationscheme" faillint -paths \ + faillint -paths \ "github.com/opentracing/opentracing-go,github.com/opentracing/opentracing-go/log,github.com/uber/jaeger-client-go,github.com/opentracing-contrib/go-stdlib/nethttp" \ ./pkg/... ./cmd/... ./tools/... ./integration/... @@ -485,12 +485,12 @@ print-go-version: ## Print the go version. @go version | awk '{print $$3}' | sed 's/go//' test-with-race: ## Run all unit tests with data race detect. - go test -tags netgo,stringlabels,localvalidationscheme -timeout 30m -race -count 1 ./... + go test -tags netgo,stringlabels -timeout 30m -race -count 1 ./... cover: ## Run all unit tests with code coverage and generates reports. $(eval COVERDIR := $(shell mktemp -d coverage.XXXXXXXXXX)) $(eval COVERFILE := $(shell mktemp $(COVERDIR)/unit.XXXXXXXXXX)) - go test -tags netgo,stringlabels,localvalidationscheme -timeout 30m -race -count 1 -coverprofile=$(COVERFILE) ./... + go test -tags netgo,stringlabels -timeout 30m -race -count 1 -coverprofile=$(COVERFILE) ./... go tool cover -html=$(COVERFILE) -o cover.html go tool cover -func=cover.html | tail -n1 @@ -778,12 +778,12 @@ check-mimir-read-write-mode-docker-compose-yaml: ## Check the jsonnet and docker integration-tests: ## Run all integration tests. integration-tests: cmd/mimir/$(UPTODATE) - go test -tags=requires_docker,stringlabels,localvalidationscheme ./integration/... + go test -tags=requires_docker,stringlabels ./integration/... integration-tests-race: ## Run all integration tests with race-enabled distroless docker image. integration-tests-race: export MIMIR_IMAGE=$(IMAGE_PREFIX)mimir:$(IMAGE_TAG_RACE) integration-tests-race: cmd/mimir/$(UPTODATE_RACE) - go test -timeout 30m -tags=requires_docker,stringlabels,localvalidationscheme ./integration/... + go test -timeout 30m -tags=requires_docker,stringlabels ./integration/... # Those vars are needed for packages target export VERSION diff --git a/development/mimir-ingest-storage/compose-up.sh b/development/mimir-ingest-storage/compose-up.sh index c0018b92b8d..b3d9d1c7a13 100755 --- a/development/mimir-ingest-storage/compose-up.sh +++ b/development/mimir-ingest-storage/compose-up.sh @@ -20,7 +20,7 @@ cd "$SCRIPT_DIR" && make # -gcflags "all=-N -l" disables optimizations that allow for better run with combination with Delve debugger. # GOARCH is not changed. -CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels,localvalidationscheme -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/mimir "${SCRIPT_DIR}"/../../cmd/mimir +CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/mimir "${SCRIPT_DIR}"/../../cmd/mimir docker_compose -f "${SCRIPT_DIR}"/docker-compose.yml build --build-arg BUILD_IMAGE="${BUILD_IMAGE}" mimir-write-zone-a-0 docker_compose -f "${SCRIPT_DIR}"/docker-compose.yml up "$@" diff --git a/development/mimir-microservices-mode/compose-up.sh b/development/mimir-microservices-mode/compose-up.sh index b69e4fe43ae..63d184f6026 100755 --- a/development/mimir-microservices-mode/compose-up.sh +++ b/development/mimir-microservices-mode/compose-up.sh @@ -22,12 +22,12 @@ cd "$SCRIPT_DIR" && make # -gcflags "all=-N -l" disables optimizations that allow for better run with combination with Delve debugger. # GOARCH is not changed. -CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels,localvalidationscheme -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/mimir "${SCRIPT_DIR}"/../../cmd/mimir +CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/mimir "${SCRIPT_DIR}"/../../cmd/mimir docker_compose -f "${SCRIPT_DIR}"/docker-compose.yml build --build-arg BUILD_IMAGE="${BUILD_IMAGE}" distributor-1 if [ "$(yq '.services.query-tee' "${SCRIPT_DIR}"/docker-compose.yml)" != "null" ]; then # If query-tee is enabled, build its binary and image as well. - CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels,localvalidationscheme -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/../../cmd/query-tee "${SCRIPT_DIR}"/../../cmd/query-tee + CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/../../cmd/query-tee "${SCRIPT_DIR}"/../../cmd/query-tee docker_compose -f "${SCRIPT_DIR}"/docker-compose.yml build --build-arg BUILD_IMAGE="${BUILD_IMAGE}" query-tee fi diff --git a/development/mimir-monolithic-mode/compose-up.sh b/development/mimir-monolithic-mode/compose-up.sh index 9c01d8948eb..e0bc61bf838 100755 --- a/development/mimir-monolithic-mode/compose-up.sh +++ b/development/mimir-monolithic-mode/compose-up.sh @@ -42,7 +42,7 @@ fi # -gcflags "all=-N -l" disables optimizations that allow for better run with combination with Delve debugger. # GOARCH is not changed. -CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels,localvalidationscheme -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/mimir "${SCRIPT_DIR}"/../../cmd/mimir +CGO_ENABLED=0 GOOS=linux go build -mod=vendor -tags=netgo,stringlabels -gcflags "all=-N -l" -o "${SCRIPT_DIR}"/mimir "${SCRIPT_DIR}"/../../cmd/mimir docker_compose -f "${SCRIPT_DIR}"/docker-compose.yml build mimir-1 && \ docker_compose -f "${SCRIPT_DIR}"/docker-compose.yml "${PROFILES[@]}" up "${ARGS[@]}" diff --git a/go.mod b/go.mod index e2784933aa9..d1626baa431 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/prometheus/alertmanager v0.28.1 github.com/prometheus/client_golang v1.23.0-rc.1 github.com/prometheus/client_model v0.6.2 - github.com/prometheus/common v0.65.1-0.20250714091050-c6ae72fb63e9 + github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 github.com/prometheus/prometheus v1.99.0 github.com/segmentio/fasthash v1.0.3 github.com/sirupsen/logrus v1.9.3 @@ -93,7 +93,7 @@ require ( go.opentelemetry.io/proto/otlp v1.6.0 go.uber.org/multierr v1.11.0 golang.org/x/term v0.32.0 - google.golang.org/api v0.239.0 + google.golang.org/api v0.238.0 google.golang.org/protobuf v1.36.6 sigs.k8s.io/kustomize/kyaml v0.18.1 ) @@ -202,7 +202,7 @@ require ( go.opentelemetry.io/otel/log v0.12.2 // indirect go.opentelemetry.io/otel/log/logtest v0.0.0-20250528051624-65b8067f18f1 // indirect go.opentelemetry.io/otel/sdk/log v0.12.2 // indirect - go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/mail.v2 v2.3.1 // indirect gopkg.in/telebot.v3 v3.2.1 // indirect @@ -343,7 +343,7 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect ) -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v1.8.2-0.20250725113505-6dd7af9abc56 +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v1.8.2-0.20250717103207-acaa45ca4d40 // https://github.com/grafana/prometheus-alertmanager/pull/118 replace github.com/prometheus/alertmanager => github.com/juliusmh/alertmanager v0.26.1-0.20250624114102-96969065f8d2 @@ -378,9 +378,3 @@ replace github.com/prometheus/otlptranslator => github.com/grafana/mimir-otlptra // Replace objstore with a fork containing https://github.com/thanos-io/objstore/pull/181. replace github.com/thanos-io/objstore => github.com/charleskorn/objstore v0.0.0-20250527065533-21d4c0c463eb - -replace go.opentelemetry.io/contrib/bridges/prometheus => github.com/aknuds1/opentelemetry-go-contrib/bridges/prometheus v0.0.0-20250716061915-e4a04e1efdd8 - -replace go.opentelemetry.io/otel/exporters/prometheus => github.com/aknuds1/opentelemetry-go/exporters/prometheus v0.0.0-20250714105753-6d10dabef4d5 - -replace go.opentelemetry.io/otel => github.com/aknuds1/opentelemetry-go v0.0.0-20250714105753-6d10dabef4d5 diff --git a/go.sum b/go.sum index 4bf8b488e99..58a36810ce2 100644 --- a/go.sum +++ b/go.sum @@ -126,12 +126,6 @@ github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Sereal/Sereal/Go/sereal v0.0.0-20231009093132-b9187f1a92c6/go.mod h1:JwrycNnC8+sZPDyzM3MQ86LvaGzSpfxg885KOOwFRW4= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/aknuds1/opentelemetry-go v0.0.0-20250714105753-6d10dabef4d5 h1:VmoJU/HB75Anuqd6lSeTz+7oROgFsFC/8BRjK7b/Vto= -github.com/aknuds1/opentelemetry-go v0.0.0-20250714105753-6d10dabef4d5/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -github.com/aknuds1/opentelemetry-go-contrib/bridges/prometheus v0.0.0-20250716061915-e4a04e1efdd8 h1:J6DHtSSL7iCTwCyYASS9E/ee2jEAHvhPn3ndpyt1hWU= -github.com/aknuds1/opentelemetry-go-contrib/bridges/prometheus v0.0.0-20250716061915-e4a04e1efdd8/go.mod h1:99W6ktc5NGVXAJ6atj2cZ/oZTVyDdHinFf60q13ktL8= -github.com/aknuds1/opentelemetry-go/exporters/prometheus v0.0.0-20250714105753-6d10dabef4d5 h1:NfkvUV/EKTJ8LoX411bFXTXMNJKqgWd2lT81Nn5nRLY= -github.com/aknuds1/opentelemetry-go/exporters/prometheus v0.0.0-20250714105753-6d10dabef4d5/go.mod h1:R8GpRXTZrqvXHDEGVH5bF6+JqAZcK8PjJcZ5nGhEWiE= github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= github.com/alecthomas/chroma/v2 v2.19.0 h1:Im+SLRgT8maArxv81mULDWN8oKxkzboH07CHesxElq4= @@ -279,14 +273,14 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczC github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/digitalocean/godo v1.157.0 h1:ReELaS6FxXNf8gryUiVH0wmyUmZN8/NCmBX4gXd3F0o= -github.com/digitalocean/godo v1.157.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM= +github.com/digitalocean/godo v1.152.0 h1:WRgkPMogZSXEJK70IkZKTB/PsMn16hMQ+NI3wCIQdzA= +github.com/digitalocean/godo v1.152.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/docker v28.3.0+incompatible h1:ffS62aKWupCWdvcee7nBU9fhnmknOqDPaJAMtfK0ImQ= -github.com/docker/docker v28.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw= +github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -577,8 +571,8 @@ github.com/grafana/memberlist v0.3.1-0.20250428154222-f7d51a6f6700 h1:0t7iOQ5ZkB github.com/grafana/memberlist v0.3.1-0.20250428154222-f7d51a6f6700/go.mod h1:Ri9p/tRShbjYnpNf4FFPXG7wxEGY4Nrcn6E7jrVa//4= github.com/grafana/mimir-otlptranslator v0.0.0-20250703083430-c31a9568ad96 h1:kq5zJVW9LyFOB5xCeQPTON2HNjwwEkefhegZXGIhQPk= github.com/grafana/mimir-otlptranslator v0.0.0-20250703083430-c31a9568ad96/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= -github.com/grafana/mimir-prometheus v1.8.2-0.20250725113505-6dd7af9abc56 h1:HdQyhMJ+AkDUTlFAU7qIIkC3C2n0R/cV8T2S7JnbK7U= -github.com/grafana/mimir-prometheus v1.8.2-0.20250725113505-6dd7af9abc56/go.mod h1:bi1IiCulyFfPIsfFMaCqlggqiLO4PyqNwK/DiqTaYDI= +github.com/grafana/mimir-prometheus v1.8.2-0.20250717103207-acaa45ca4d40 h1:OJyH1LqzHc1tHOxRQsu2pHxejVgErhh6r472wwyun6A= +github.com/grafana/mimir-prometheus v1.8.2-0.20250717103207-acaa45ca4d40/go.mod h1:MulFQg8pjFVYGJZACg+4he5Z6BnrlTYLCMfTDKp8uGc= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956 h1:em1oddjXL8c1tL0iFdtVtPloq2hRPen2MJQKoAWpxu0= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8= @@ -755,8 +749,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/linode/linodego v1.52.2 h1:N9ozU27To1LMSrDd8WvJZ5STSz1eGYdyLnxhAR/dIZg= -github.com/linode/linodego v1.52.2/go.mod h1:bI949fZaVchjWyKIA08hNyvAcV6BAS+PM2op3p7PAWA= +github.com/linode/linodego v1.52.1 h1:HJ1cz1n9n3chRP9UrtqmP91+xTi0Q5l+H/4z4tpkwgQ= +github.com/linode/linodego v1.52.1/go.mod h1:zEN2sX+cSdp67EuRY1HJiyuLujoa7HqvVwNEcJv3iXw= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -878,7 +872,6 @@ github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwy github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -913,8 +906,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.65.1-0.20250714091050-c6ae72fb63e9 h1:W+mk95PFPPi5NOzr2MtiGe7BXlHmsxs7UESIGsW5S08= -github.com/prometheus/common v0.65.1-0.20250714091050-c6ae72fb63e9/go.mod h1:41VB7D5p4TG2i2w5P4G62ofoS2mVyeTQ9QIAKYE60TE= +github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 h1:R/zO7ombSHCI8bjQusgCMSL+cE669w5/R2upq5WlPD0= +github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= @@ -937,7 +930,6 @@ github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= @@ -1115,6 +1107,8 @@ go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCu go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns= go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 h1:u2E32P7j1a/gRgZDWhIXC+Shd4rLg70mnE7QLI/Ssnw= go.opentelemetry.io/contrib/bridges/otelzap v0.11.0/go.mod h1:pJPCLM8gzX4ASqLlyAXjHBEYxgbOQJ/9bidWxD6PEPQ= +go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 h1:RyrtJzu5MAmIcbRrwg75b+w3RlZCP0vJByDVzcpAe3M= +go.opentelemetry.io/contrib/bridges/prometheus v0.61.0/go.mod h1:tirr4p9NXbzjlbruiRGp53IzlYrDk5CO2fdHj0sSSaY= go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 h1:XfzKtKSrbtYk9TNCF8dkO0Y9M7IOfb4idCwBOTwGBiI= @@ -1129,6 +1123,9 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 h1:UIrZgRBHUrYRlJ4V419lVb go.opentelemetry.io/contrib/propagators/jaeger v1.35.0/go.mod h1:0ciyFyYZxE6JqRAQvIgGRabKWDUmNdW3GAQb6y/RlFU= go.opentelemetry.io/contrib/samplers/jaegerremote v0.30.0 h1:bQ1Gvah4Sp8z7epSkgJaNTuZm7sutfA6Fji2/7cKFMc= go.opentelemetry.io/contrib/samplers/jaegerremote v0.30.0/go.mod h1:9b8Q9rH52NgYH3ShiTFB5wf18Vt3RTH/VMB7LDcC1ug= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 h1:06ZeJRe5BnYXceSM9Vya83XXVaNGe3H1QqsvqRANQq8= @@ -1145,6 +1142,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ= +go.opentelemetry.io/otel/exporters/prometheus v0.58.0 h1:CJAxWKFIqdBennqxJyOgnt5LqkeFRT+Mz3Yjz3hL+h8= +go.opentelemetry.io/otel/exporters/prometheus v0.58.0/go.mod h1:7qo/4CLI+zYSNbv0GMNquzuss2FVZo3OYrGh96n4HNc= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 h1:12vMqzLLNZtXuXbJhSENRg+Vvx+ynNilV8twBLBsXMY= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2/go.mod h1:ZccPZoPOoq8x3Trik/fCsba7DEYDUnN6yX79pgp2BUQ= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= @@ -1165,10 +1164,9 @@ go.opentelemetry.io/otel/sdk/log v0.12.2 h1:yNoETvTByVKi7wHvYS6HMcZrN5hFLD7I++1x go.opentelemetry.io/otel/sdk/log v0.12.2/go.mod h1:DcpdmUXHJgSqN/dh+XMWa7Vf89u9ap0/AAk/XGLnEzY= go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc h1:uqxdywfHqqCl6LmZzI3pUnXT1RGFYyUgxj0AkWPFxi0= go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc/go.mod h1:TY/N/FT7dmFrP/r5ym3g0yysP1DefqGpAZr4f82P0dE= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -1199,9 +1197,6 @@ golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1250,9 +1245,6 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1310,9 +1302,6 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1353,8 +1342,6 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1454,13 +1441,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= -golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= @@ -1468,9 +1450,6 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1486,9 +1465,6 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1558,8 +1534,6 @@ golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1615,8 +1589,8 @@ google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRR google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= -google.golang.org/api v0.239.0 h1:2hZKUnFZEy81eugPs4e2XzIJ5SOwQg0G82bpXD65Puo= -google.golang.org/api v0.239.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= +google.golang.org/api v0.238.0 h1:+EldkglWIg/pWjkq97sd+XxH7PxakNYoe/rkSTbnvOs= +google.golang.org/api v0.238.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/pkg/cardinality/request.go b/pkg/cardinality/request.go index 3d837c54039..b42db2e5984 100644 --- a/pkg/cardinality/request.go +++ b/pkg/cardinality/request.go @@ -263,7 +263,7 @@ func extractLabelNames(values url.Values) ([]model.LabelName, error) { labelNames := make([]model.LabelName, 0, len(labelNamesParams)) for _, labelNameParam := range labelNamesParams { labelName := model.LabelName(labelNameParam) - if !labelName.IsValid(model.UTF8Validation) { + if !labelName.IsValid() { return nil, fmt.Errorf("invalid 'label_names' param '%v'", labelNameParam) } labelNames = append(labelNames, labelName) diff --git a/pkg/distributor/validate.go b/pkg/distributor/validate.go index 8a52f43cee0..5cbd5a767a0 100644 --- a/pkg/distributor/validate.go +++ b/pkg/distributor/validate.go @@ -16,6 +16,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" "github.com/grafana/mimir/pkg/costattribution" "github.com/grafana/mimir/pkg/mimirpb" @@ -424,7 +425,7 @@ func validateLabels(m *sampleValidationMetrics, cfg labelValidationConfig, userI validationScheme := cfg.ValidationScheme(userID) - if !model.IsValidMetricName(model.LabelValue(unsafeMetricName), validationScheme) { + if !labels.IsValidMetricName(unsafeMetricName, validationScheme) { cat.IncrementDiscardedSamples(ls, 1, reasonInvalidMetricName, ts) m.invalidMetricName.WithLabelValues(userID, group).Inc() return fmt.Errorf(invalidMetricNameMsgFormat, removeNonASCIIChars(unsafeMetricName)) @@ -450,7 +451,7 @@ func validateLabels(m *sampleValidationMetrics, cfg labelValidationConfig, userI maxLabelValueLength := cfg.MaxLabelValueLength(userID) lastLabelName := "" for _, l := range ls { - if !skipLabelValidation && !model.LabelName(l.Name).IsValid(validationScheme) { + if !skipLabelValidation && !labels.IsValidLabelName(l.Name, validationScheme) { m.invalidLabel.WithLabelValues(userID, group).Inc() cat.IncrementDiscardedSamples(ls, 1, reasonInvalidLabel, ts) return fmt.Errorf(invalidLabelMsgFormat, l.Name, mimirpb.FromLabelAdaptersToString(ls)) diff --git a/pkg/frontend/querymiddleware/error_caching.go b/pkg/frontend/querymiddleware/error_caching.go index cb5cf3a80bf..417bb86bf23 100644 --- a/pkg/frontend/querymiddleware/error_caching.go +++ b/pkg/frontend/querymiddleware/error_caching.go @@ -15,7 +15,6 @@ import ( "github.com/grafana/dskit/tracing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/common/model" apierror "github.com/grafana/mimir/pkg/api/error" "github.com/grafana/mimir/pkg/util/spanlogger" @@ -197,7 +196,7 @@ func (e *errorCachingHandler) isCacheable(apiErr *apierror.APIError) (bool, stri func addWithExemplar(ctx context.Context, counter prometheus.Counter, val float64) { if traceID, traceOK := tracing.ExtractSampledTraceID(ctx); traceOK { - counter.(prometheus.ExemplarAdder).AddWithExemplar(val, prometheus.Labels{"trace_id": traceID, "traceID": traceID}, model.UTF8Validation) + counter.(prometheus.ExemplarAdder).AddWithExemplar(val, prometheus.Labels{"trace_id": traceID, "traceID": traceID}) } else { // If there is no trace ID, just add to the counter. counter.Add(val) diff --git a/pkg/frontend/querymiddleware/request_validation_test.go b/pkg/frontend/querymiddleware/request_validation_test.go index 54e0d9234af..6d10251f979 100644 --- a/pkg/frontend/querymiddleware/request_validation_test.go +++ b/pkg/frontend/querymiddleware/request_validation_test.go @@ -208,6 +208,7 @@ func TestCardinalityQueryRequestValidationRoundTripper(t *testing.T) { expectedErrType: apierror.TypeBadData, }, { + // TODO: Check // non-legacy label name will be accepted url: cardinalityLabelValuesPathSuffix + "?label_names[]=\\xbd\\xb2\\x3d\\xbc\\x20\\xe2\\x8c\\x98", expectedErrType: "", diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go index 4b476d7b87e..31294c1de83 100644 --- a/pkg/mimir/modules.go +++ b/pkg/mimir/modules.go @@ -821,6 +821,7 @@ func (t *Mimir) initQueryFrontendTripperware() (serv services.Service, err error panic(fmt.Sprintf("invalid config not caught by validation: unknown PromQL engine '%s'", t.Cfg.Querier.QueryEngine)) } + // TODO: Decide whether this is a good idea. eng = streamingpromqlcompat.NameValidatingEngine(eng, t.Overrides) tripperware, err := querymiddleware.NewTripperware( diff --git a/pkg/mimirtool/commands/analyse_rulefiles.go b/pkg/mimirtool/commands/analyse_rulefiles.go index b75d24b474f..aafd62aae69 100644 --- a/pkg/mimirtool/commands/analyse_rulefiles.go +++ b/pkg/mimirtool/commands/analyse_rulefiles.go @@ -22,8 +22,8 @@ type RuleFileAnalyzeCommand struct { } func (cmd *RuleFileAnalyzeCommand) run(_ *kingpin.ParseContext) error { - - output, err := AnalyzeRuleFiles(cmd.RuleFilesList) + // TODO: Get scheme from CLI flag. + output, err := AnalyzeRuleFiles(cmd.RuleFilesList, model.LegacyValidation) if err != nil { return err } @@ -37,11 +37,11 @@ func (cmd *RuleFileAnalyzeCommand) run(_ *kingpin.ParseContext) error { } // AnalyzeRuleFiles analyze rules files and return the list metrics used in them. -func AnalyzeRuleFiles(ruleFiles []string) (*analyze.MetricsInRuler, error) { +func AnalyzeRuleFiles(ruleFiles []string, scheme model.ValidationScheme) (*analyze.MetricsInRuler, error) { output := &analyze.MetricsInRuler{} output.OverallMetrics = make(map[string]struct{}) - nss, err := rules.ParseFiles(rules.MimirBackend, ruleFiles) + nss, err := rules.ParseFiles(rules.MimirBackend, ruleFiles, scheme) if err != nil { return nil, errors.Wrap(err, "analyze operation unsuccessful, unable to parse rules files") } diff --git a/pkg/mimirtool/commands/rules.go b/pkg/mimirtool/commands/rules.go index c55c3b99695..2b85a714676 100644 --- a/pkg/mimirtool/commands/rules.go +++ b/pkg/mimirtool/commands/rules.go @@ -19,6 +19,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/rulefmt" "github.com/prometheus/prometheus/promql/parser" log "github.com/sirupsen/logrus" @@ -475,7 +476,8 @@ func (r *RuleCommand) deleteRuleGroup(_ *kingpin.ParseContext) error { } func (r *RuleCommand) loadRules(_ *kingpin.ParseContext) error { - nss, err := rules.ParseFiles(r.Backend, r.RuleFilesList) + // TODO: Get scheme from CLI flag. + nss, err := rules.ParseFiles(r.Backend, r.RuleFilesList, model.LegacyValidation) if err != nil { return errors.Wrap(err, "load operation unsuccessful, unable to parse rules files") } @@ -544,7 +546,8 @@ func (r *RuleCommand) diffRules(_ *kingpin.ParseContext) error { return errors.Wrap(err, "diff operation unsuccessful, invalid arguments") } - nss, err := rules.ParseFiles(r.Backend, r.RuleFilesList) + // TODO: Get scheme from CLI flag. + nss, err := rules.ParseFiles(r.Backend, r.RuleFilesList, model.LegacyValidation) if err != nil { return errors.Wrap(err, "diff operation unsuccessful, unable to parse rules files") } @@ -612,7 +615,8 @@ func (r *RuleCommand) syncRules(_ *kingpin.ParseContext) error { return errors.Wrap(err, "sync operation unsuccessful, invalid arguments") } - nss, err := rules.ParseFiles(r.Backend, r.RuleFilesList) + // TODO: Get scheme from CLI flag. + nss, err := rules.ParseFiles(r.Backend, r.RuleFilesList, model.LegacyValidation) if err != nil { return errors.Wrap(err, "sync operation unsuccessful, unable to parse rules files") } @@ -720,7 +724,8 @@ func (r *RuleCommand) prepare(_ *kingpin.ParseContext) error { return errors.Wrap(err, "prepare operation unsuccessful, invalid arguments") } - namespaces, err := rules.ParseFiles(r.Backend, r.RuleFilesList) + // TODO: Get scheme from CLI flag. + namespaces, err := rules.ParseFiles(r.Backend, r.RuleFilesList, model.LegacyValidation) if err != nil { return errors.Wrap(err, "prepare operation unsuccessful, unable to parse rules files") } @@ -758,7 +763,8 @@ func (r *RuleCommand) lint(_ *kingpin.ParseContext) error { return errors.Wrap(err, "prepare operation unsuccessful, invalid arguments") } - namespaces, err := rules.ParseFiles(r.Backend, r.RuleFilesList) + // TODO: Get scheme from CLI flag. + namespaces, err := rules.ParseFiles(r.Backend, r.RuleFilesList, model.LegacyValidation) if err != nil { return errors.Wrap(err, "prepare operation unsuccessful, unable to parse rules files") } @@ -792,7 +798,8 @@ func (r *RuleCommand) checkRules(_ *kingpin.ParseContext) error { return errors.Wrap(err, "check operation unsuccessful, invalid arguments") } - namespaces, err := rules.ParseFiles(r.Backend, r.RuleFilesList) + // TODO: Get scheme from CLI flag. + namespaces, err := rules.ParseFiles(r.Backend, r.RuleFilesList, model.LegacyValidation) if err != nil { return errors.Wrap(err, "check operation unsuccessful, unable to parse rules files") } diff --git a/pkg/mimirtool/rules/parser.go b/pkg/mimirtool/rules/parser.go index 9263328a39c..01b84148406 100644 --- a/pkg/mimirtool/rules/parser.go +++ b/pkg/mimirtool/rules/parser.go @@ -14,6 +14,7 @@ import ( "path/filepath" "strings" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/rulefmt" log "github.com/sirupsen/logrus" yaml "gopkg.in/yaml.v3" @@ -29,9 +30,9 @@ var ( ) // ParseFiles returns a formatted set of prometheus rule groups -func ParseFiles(backend string, files []string) (map[string]RuleNamespace, error) { +func ParseFiles(backend string, files []string, scheme model.ValidationScheme) (map[string]RuleNamespace, error) { ruleSet := map[string]RuleNamespace{} - var parseFn func(f string) ([]RuleNamespace, []error) + var parseFn func(f string, scheme model.ValidationScheme) ([]RuleNamespace, []error) switch backend { case MimirBackend: parseFn = Parse @@ -40,7 +41,7 @@ func ParseFiles(backend string, files []string) (map[string]RuleNamespace, error } for _, f := range files { - nss, errs := parseFn(f) + nss, errs := parseFn(f, scheme) for _, err := range errs { log.WithError(err).WithField("file", f).Errorln("unable to parse rules file") return nil, errFileReadError @@ -72,17 +73,17 @@ func ParseFiles(backend string, files []string) (map[string]RuleNamespace, error } // Parse parses and validates a set of rules. -func Parse(f string) ([]RuleNamespace, []error) { +func Parse(f string, scheme model.ValidationScheme) ([]RuleNamespace, []error) { content, err := loadFile(f) if err != nil { log.WithError(err).WithField("file", f).Errorln("unable to load rules file") return nil, []error{errFileReadError} } - return ParseBytes(content) + return ParseBytes(content, scheme) } -func ParseBytes(content []byte) ([]RuleNamespace, []error) { +func ParseBytes(content []byte, scheme model.ValidationScheme) ([]RuleNamespace, []error) { decoder := yaml.NewDecoder(bytes.NewReader(content)) decoder.KnownFields(true) @@ -117,7 +118,7 @@ func ParseBytes(content []byte) ([]RuleNamespace, []error) { return nil, []error{err} } - if errs := ns.Validate(node.GroupNodes); len(errs) > 0 { + if errs := ns.Validate(node.GroupNodes, scheme); len(errs) > 0 { return nil, errs } } diff --git a/pkg/mimirtool/rules/rules.go b/pkg/mimirtool/rules/rules.go index df03ef620a8..1a68fab9576 100644 --- a/pkg/mimirtool/rules/rules.go +++ b/pkg/mimirtool/rules/rules.go @@ -228,7 +228,7 @@ func prepareBinaryExpr(e *parser.BinaryExpr, label string, rule string) error { } // Validate each rule in the rule namespace is valid -func (r RuleNamespace) Validate(groupNodes []rulefmt.RuleGroupNode) []error { +func (r RuleNamespace) Validate(groupNodes []rulefmt.RuleGroupNode, scheme model.ValidationScheme) []error { set := map[string]struct{}{} var errs []error @@ -246,18 +246,17 @@ func (r RuleNamespace) Validate(groupNodes []rulefmt.RuleGroupNode) []error { set[g.Name] = struct{}{} - errs = append(errs, ValidateRuleGroup(g, groupNodes[i])...) + errs = append(errs, ValidateRuleGroup(g, groupNodes[i], scheme)...) } return errs } // ValidateRuleGroup validates a rulegroup -func ValidateRuleGroup(g rwrulefmt.RuleGroup, node rulefmt.RuleGroupNode) []error { +func ValidateRuleGroup(g rwrulefmt.RuleGroup, node rulefmt.RuleGroupNode, scheme model.ValidationScheme) []error { var errs []error for i, r := range g.Rules { - // TODO(juliusmh): fixed to legacy validation - for _, err := range r.Validate(node.Rules[i], model.LegacyValidation) { + for _, err := range r.Validate(node.Rules[i], scheme) { var ruleName string if r.Alert != "" { ruleName = r.Alert diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index f7c752baca7..77b0a4b5d5a 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -203,6 +203,7 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, quer panic(fmt.Sprintf("invalid config not caught by validation: unknown PromQL engine '%s'", cfg.QueryEngine)) } + // TODO: Check whether this approach is a good idea. eng = compat.NameValidatingEngine(eng, limits) return NewSampleAndChunkQueryable(lazyQueryable), exemplarQueryable, eng, nil } diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 01e24c99e0e..ce77431702f 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -1353,6 +1353,11 @@ func (r *Ruler) IsMaxRuleGroupsLimited(userID, namespace string) bool { return r.limits.RulerMaxRuleGroupsPerTenant(userID, namespace) > 0 } +// NameValidationScheme returns the validation scheme to use for a particular tenant. +func (r *Ruler) NameValidationScheme(userID string) model.ValidationScheme { + return r.limits.ValidationScheme(userID) +} + // AssertMaxRuleGroups limit has not been reached compared to the current // number of total rule groups in input and returns an error if so. func (r *Ruler) AssertMaxRuleGroups(userID, namespace string, rg int) error { diff --git a/pkg/streamingpromql/operators/aggregations/count_values.go b/pkg/streamingpromql/operators/aggregations/count_values.go index a782798d7fe..7900e7d5c96 100644 --- a/pkg/streamingpromql/operators/aggregations/count_values.go +++ b/pkg/streamingpromql/operators/aggregations/count_values.go @@ -158,7 +158,7 @@ func (c *CountValues) SeriesMetadata(ctx context.Context) ([]types.SeriesMetadat func (c *CountValues) loadLabelName() error { c.resolvedLabelName = c.LabelName.GetValue() - if !model.LabelName(c.resolvedLabelName).IsValid(c.nameValidationScheme) { + if !labels.IsValidLabelName(c.resolvedLabelName, c.nameValidationScheme) { return fmt.Errorf("invalid label name %q", c.resolvedLabelName) } diff --git a/pkg/streamingpromql/operators/functions/label.go b/pkg/streamingpromql/operators/functions/label.go index dd378445258..7ce5acad41c 100644 --- a/pkg/streamingpromql/operators/functions/label.go +++ b/pkg/streamingpromql/operators/functions/label.go @@ -21,15 +21,15 @@ import ( func LabelJoinFactory(dstLabelOp, separatorOp types.StringOperator, srcLabelOps []types.StringOperator, validationScheme model.ValidationScheme) SeriesMetadataFunction { return func(seriesMetadata []types.SeriesMetadata, tracker *limiter.MemoryConsumptionTracker) ([]types.SeriesMetadata, error) { dst := dstLabelOp.GetValue() - if !model.LabelName(dst).IsValid(validationScheme) { + if !labels.IsValidLabelName(dst, validationScheme) { return nil, fmt.Errorf("invalid destination label name in label_join(): %s", dst) } separator := separatorOp.GetValue() srcLabels := make([]string, len(srcLabelOps)) for i, op := range srcLabelOps { src := op.GetValue() - if !model.LabelName(src).IsValid(validationScheme) { - return nil, fmt.Errorf("invalid source label name in label_join(): %s", dst) + if !labels.IsValidLabelName(src, validationScheme) { + return nil, fmt.Errorf("invalid source label name in label_join(): %s", src) } srcLabels[i] = src } @@ -71,7 +71,7 @@ func LabelReplaceFactory(dstLabelOp, replacementOp, srcLabelOp, regexOp types.St return nil, fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr) } dst := dstLabelOp.GetValue() - if !model.LabelName(dst).IsValid(validationScheme) { + if !labels.IsValidLabelName(dst, validationScheme) { return nil, fmt.Errorf("invalid destination label name in label_replace(): %s", dst) } repl := replacementOp.GetValue() diff --git a/tools/benchmark-query-engine/main.go b/tools/benchmark-query-engine/main.go index 581c8fe44e8..bce00cf0eca 100644 --- a/tools/benchmark-query-engine/main.go +++ b/tools/benchmark-query-engine/main.go @@ -203,7 +203,7 @@ func (a *app) buildBinary() error { a.binaryPath = filepath.Join(a.tempDir, "benchmark-binary") - cmd := exec.Command("go", "test", "-c", "-o", a.binaryPath, "-tags", "stringlabels,localvalidationscheme", ".") + cmd := exec.Command("go", "test", "-c", "-o", a.binaryPath, "-tags", "stringlabels", ".") cmd.Dir = a.benchmarkPackageDir cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 8e1b02f01af..1448439b7f7 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -14,6 +14,7 @@ package expfmt import ( + "bufio" "fmt" "io" "math" @@ -69,6 +70,21 @@ func ResponseFormat(h http.Header) Format { return FmtUnknown } +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format.FormatType() { + case TypeProtoDelim: + return &protoDecoder{r: bufio.NewReader(r)} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r protodelim.Reader +} + // Decode implements the Decoder interface. func (d *protoDecoder) Decode(v *dto.MetricFamily) error { opts := protodelim.UnmarshalOptions{ @@ -77,7 +93,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } - if !d.isValidMetricName(v.GetName()) { + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { return fmt.Errorf("invalid metric name %q", v.GetName()) } for _, m := range v.GetMetric() { @@ -91,7 +107,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if !model.LabelValue(l.GetValue()).IsValid() { return fmt.Errorf("invalid label value %q", l.GetValue()) } - if !d.isValidLabelName(l.GetName()) { + if !model.LabelName(l.GetName()).IsValid() { return fmt.Errorf("invalid label name %q", l.GetName()) } } diff --git a/vendor/github.com/prometheus/common/expfmt/decode_globalvalidationscheme.go b/vendor/github.com/prometheus/common/expfmt/decode_globalvalidationscheme.go deleted file mode 100644 index ffb8b6ed8a5..00000000000 --- a/vendor/github.com/prometheus/common/expfmt/decode_globalvalidationscheme.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !localvalidationscheme - -package expfmt - -import ( - "bufio" - "io" - - "google.golang.org/protobuf/encoding/protodelim" - - "github.com/prometheus/common/model" -) - -// protoDecoder implements the Decoder interface for protocol buffers. -type protoDecoder struct { - r protodelim.Reader -} - -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. -func NewDecoder(r io.Reader, format Format) Decoder { - switch format.FormatType() { - case TypeProtoDelim: - return &protoDecoder{r: bufio.NewReader(r)} - } - return &textDecoder{r: r} -} - -func (d *protoDecoder) isValidMetricName(name string) bool { - return model.IsValidMetricName(model.LabelValue(name)) -} - -func (d *protoDecoder) isValidLabelName(name string) bool { - return model.LabelName(name).IsValid() -} diff --git a/vendor/github.com/prometheus/common/expfmt/decode_localvalidationscheme.go b/vendor/github.com/prometheus/common/expfmt/decode_localvalidationscheme.go deleted file mode 100644 index 8e0dbb59648..00000000000 --- a/vendor/github.com/prometheus/common/expfmt/decode_localvalidationscheme.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build localvalidationscheme - -package expfmt - -import ( - "bufio" - "io" - - "google.golang.org/protobuf/encoding/protodelim" - - "github.com/prometheus/common/model" -) - -// protoDecoder implements the Decoder interface for protocol buffers. -type protoDecoder struct { - r protodelim.Reader - validationScheme model.ValidationScheme -} - -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. -func NewDecoder(r io.Reader, format Format, validationScheme model.ValidationScheme) Decoder { - switch format.FormatType() { - case TypeProtoDelim: - return &protoDecoder{ - r: bufio.NewReader(r), - validationScheme: validationScheme, - } - } - return &textDecoder{r: r} -} - -func (d *protoDecoder) isValidMetricName(name string) bool { - return model.IsValidMetricName(model.LabelValue(name), d.validationScheme) -} - -func (d *protoDecoder) isValidLabelName(name string) bool { - return model.LabelName(name).IsValid(d.validationScheme) -} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 2cb61932bad..460f554f294 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -88,20 +88,20 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus { } // Validate checks whether the alert data is inconsistent. -func (a *Alert) validate(scheme ValidationScheme) error { +func (a *Alert) Validate() error { if a.StartsAt.IsZero() { return errors.New("start time missing") } if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { return errors.New("start time must be before end time") } - if err := a.Labels.validate(scheme); err != nil { + if err := a.Labels.Validate(); err != nil { return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { return errors.New("at least one label pair required") } - if err := a.Annotations.validate(scheme); err != nil { + if err := a.Annotations.Validate(); err != nil { return fmt.Errorf("invalid annotations: %w", err) } return nil diff --git a/vendor/github.com/prometheus/common/model/alert_globalvalidationscheme.go b/vendor/github.com/prometheus/common/model/alert_globalvalidationscheme.go deleted file mode 100644 index b71b84eb23d..00000000000 --- a/vendor/github.com/prometheus/common/model/alert_globalvalidationscheme.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !localvalidationscheme - -package model - -// Validate checks whether the alert data is inconsistent. -func (a *Alert) Validate() error { - return a.validate(NameValidationScheme) -} diff --git a/vendor/github.com/prometheus/common/model/alert_localvalidationscheme.go b/vendor/github.com/prometheus/common/model/alert_localvalidationscheme.go deleted file mode 100644 index eb3e7ff76ae..00000000000 --- a/vendor/github.com/prometheus/common/model/alert_localvalidationscheme.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build localvalidationscheme - -package model - -// Validate checks whether the alert data is inconsistent. -func (a *Alert) Validate(scheme ValidationScheme) error { - return a.validate(scheme) -} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 112d61c9db7..e2ff835950d 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -19,8 +19,6 @@ import ( "regexp" "strings" "unicode/utf8" - - "gopkg.in/yaml.v2" ) const ( @@ -105,17 +103,20 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -func (ln LabelName) isValid(scheme ValidationScheme) bool { +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. +func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } - switch scheme { + switch NameValidationScheme { case LegacyValidation: return ln.IsValidLegacy() case UTF8Validation: return utf8.ValidString(string(ln)) default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %s", scheme)) + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } } @@ -135,11 +136,31 @@ func (ln LabelName) IsValidLegacy() bool { return true } -var ( - labelName LabelName - _ yaml.Unmarshaler = &labelName - _ json.Unmarshaler = &labelName -) +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} // LabelNames is a sortable LabelName slice. In implements sort.Interface. type LabelNames []LabelName diff --git a/vendor/github.com/prometheus/common/model/labels_globalvalidationscheme.go b/vendor/github.com/prometheus/common/model/labels_globalvalidationscheme.go deleted file mode 100644 index 0460a4d3206..00000000000 --- a/vendor/github.com/prometheus/common/model/labels_globalvalidationscheme.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !localvalidationscheme - -package model - -import ( - "encoding/json" - "fmt" -) - -// IsValid returns true iff the name matches the pattern of LabelNameRE when -// scheme is LegacyValidation, or valid UTF-8 if it is UTF8Validation. -func (ln LabelName) IsValid() bool { - return ln.isValid(NameValidationScheme) -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (ln *LabelName) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} diff --git a/vendor/github.com/prometheus/common/model/labels_localvalidationscheme.go b/vendor/github.com/prometheus/common/model/labels_localvalidationscheme.go deleted file mode 100644 index 9b7e4aab6d8..00000000000 --- a/vendor/github.com/prometheus/common/model/labels_localvalidationscheme.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build localvalidationscheme - -package model - -import ( - "encoding/json" - "fmt" -) - -// IsValid returns true iff the name matches the pattern of LabelNameRE when -// scheme is LegacyValidation, or valid UTF-8 if it is UTF8Validation. -func (ln LabelName) IsValid(validationScheme ValidationScheme) bool { - return ln.isValid(validationScheme) -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -// Validation is done using UTF8Validation. -func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - if !LabelName(s).IsValid(UTF8Validation) { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// Validation is done using UTF8Validation. -func (ln *LabelName) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !LabelName(s).IsValid(UTF8Validation) { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go index f14d2d8a34d..d0ad88da334 100644 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -26,9 +26,11 @@ import ( // match. type LabelSet map[LabelName]LabelValue -func (ls LabelSet) validate(scheme ValidationScheme) error { +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { for ln, lv := range ls { - if !ln.isValid(scheme) { + if !ln.IsValid() { return fmt.Errorf("invalid name %q", ln) } if !lv.IsValid() { @@ -137,7 +139,20 @@ func (ls LabelSet) FastFingerprint() Fingerprint { return labelSetToFastFingerprint(ls) } -var ( - labelSet LabelSet - _ json.Unmarshaler = &labelSet -) +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/labelset_globalvalidationscheme.go b/vendor/github.com/prometheus/common/model/labelset_globalvalidationscheme.go deleted file mode 100644 index f978a0d62b3..00000000000 --- a/vendor/github.com/prometheus/common/model/labelset_globalvalidationscheme.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !localvalidationscheme - -package model - -import ( - "encoding/json" - "fmt" -) - -// Validate checks whether all names and values in the label set -// are valid. -func (ls LabelSet) Validate() error { - return ls.validate(NameValidationScheme) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { - var m map[LabelName]LabelValue - if err := json.Unmarshal(b, &m); err != nil { - return err - } - // encoding/json only unmarshals maps of the form map[string]T. It treats - // LabelName as a string and does not call its UnmarshalJSON method. - // Thus, we have to replicate the behavior here. - for ln := range m { - if !ln.IsValid() { - return fmt.Errorf("%q is not a valid label name", ln) - } - } - *l = LabelSet(m) - return nil -} diff --git a/vendor/github.com/prometheus/common/model/labelset_localvalidationscheme.go b/vendor/github.com/prometheus/common/model/labelset_localvalidationscheme.go deleted file mode 100644 index 5c4c282e4c9..00000000000 --- a/vendor/github.com/prometheus/common/model/labelset_localvalidationscheme.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build localvalidationscheme - -package model - -import ( - "encoding/json" - "fmt" -) - -// Validate checks whether all names and values in the label set -// are valid. -func (ls LabelSet) Validate(scheme ValidationScheme) error { - return ls.validate(scheme) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// Validates label names using UTF8Validation. -func (l *LabelSet) UnmarshalJSON(b []byte) error { - var m map[LabelName]LabelValue - if err := json.Unmarshal(b, &m); err != nil { - return err - } - // encoding/json only unmarshals maps of the form map[string]T. It treats - // LabelName as a string and does not call its UnmarshalJSON method. - // Thus, we have to replicate the behavior here. - for ln := range m { - if !ln.IsValid(UTF8Validation) { - return fmt.Errorf("%q is not a valid label name", ln) - } - } - *l = LabelSet(m) - return nil -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index abeb31cb66c..2bd913fff21 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -27,13 +27,36 @@ import ( "gopkg.in/yaml.v2" ) -// NameEscapingScheme defines the default way that names will be escaped when -// presented to systems that do not support UTF-8 names. If the Content-Type -// "escaping" term is specified, that will override this value. -// NameEscapingScheme should not be set to the NoEscaping value. That string -// is used in content negotiation to indicate that a system supports UTF-8 and -// has that feature enabled. -var NameEscapingScheme = UnderscoreEscaping +var ( + // NameValidationScheme determines the global default method of the name + // validation to be used by all calls to IsValidMetricName() and LabelName + // IsValid(). + // + // Deprecated: This variable should not be used and might be removed in the + // far future. If you wish to stick to the legacy name validation use + // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods + // instead. This variable is here as an escape hatch for emergency cases, + // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., + // to delay UTF-8 migrations in time or aid in debugging unforeseen results of + // the change. In such a case, a temporary assignment to `LegacyValidation` + // value in the `init()` function in your main.go or so, could be considered. + // + // Historically we opted for a global variable for feature gating different + // validation schemes in operations that were not otherwise easily adjustable + // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate + // Labels structure or package might have been a better choice. Given the + // change was made and many upgraded the common already, we live this as-is + // with this warning and learning for the future. + NameValidationScheme = UTF8Validation + + // NameEscapingScheme defines the default way that names will be escaped when + // presented to systems that do not support UTF-8 names. If the Content-Type + // "escaping" term is specified, that will override this value. + // NameEscapingScheme should not be set to the NoEscaping value. That string + // is used in content negotiation to indicate that a system supports UTF-8 and + // has that feature enabled. + NameEscapingScheme = UnderscoreEscaping +) // ValidationScheme is a Go enum for determining how metric and label names will // be validated by this library. @@ -204,8 +227,11 @@ func (m Metric) FastFingerprint() Fingerprint { return LabelSet(m).FastFingerprint() } -func isValidMetricName(n LabelValue, scheme ValidationScheme) bool { - switch scheme { +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE +// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is +// selected. +func IsValidMetricName(n LabelValue) bool { + switch NameValidationScheme { case LegacyValidation: return IsValidLegacyMetricName(string(n)) case UTF8Validation: @@ -214,12 +240,12 @@ func isValidMetricName(n LabelValue, scheme ValidationScheme) bool { } return utf8.ValidString(string(n)) default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %s", scheme)) + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", NameValidationScheme.String())) } } // IsValidLegacyMetricName is similar to IsValidMetricName but always uses the -// legacy validation scheme. +// legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. func IsValidLegacyMetricName(n string) bool { diff --git a/vendor/github.com/prometheus/common/model/metric_globalvalidationscheme.go b/vendor/github.com/prometheus/common/model/metric_globalvalidationscheme.go deleted file mode 100644 index ff038d88c83..00000000000 --- a/vendor/github.com/prometheus/common/model/metric_globalvalidationscheme.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !localvalidationscheme - -package model - -// NameValidationScheme determines the global default method of the name -// validation to be used by all calls to IsValidMetricName() and LabelName -// IsValid(). -// -// Deprecated: This variable should not be used and might be removed in the -// far future. If you wish to stick to the legacy name validation use -// `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods -// instead. This variable is here as an escape hatch for emergency cases, -// given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., -// to delay UTF-8 migrations in time or aid in debugging unforeseen results of -// the change. In such a case, a temporary assignment to `LegacyValidation` -// value in the `init()` function in your main.go or so, could be considered. -// -// Historically we opted for a global variable for feature gating different -// validation schemes in operations that were not otherwise easily adjustable -// (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate -// Labels structure or package might have been a better choice. Given the -// change was made and many upgraded the common already, we live this as-is -// with this warning and learning for the future. -var NameValidationScheme = UTF8Validation - -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE -// for legacy names, and iff it's valid UTF-8 if scheme is UTF8Validation. -func IsValidMetricName(n LabelValue) bool { - return isValidMetricName(n, NameValidationScheme) -} diff --git a/vendor/github.com/prometheus/common/model/metric_localvalidationscheme.go b/vendor/github.com/prometheus/common/model/metric_localvalidationscheme.go deleted file mode 100644 index 1e7d0d74acb..00000000000 --- a/vendor/github.com/prometheus/common/model/metric_localvalidationscheme.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build localvalidationscheme - -package model - -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE -// for legacy names, and iff it's valid UTF-8 if scheme is UTF8Validation. -func IsValidMetricName(n LabelValue, scheme ValidationScheme) bool { - return isValidMetricName(n, scheme) -} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index b5b2e21ac7b..8f91a9702e0 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -45,8 +45,9 @@ func (m *Matcher) UnmarshalJSON(b []byte) error { return nil } -func (m *Matcher) validate(scheme ValidationScheme) error { - if !m.Name.isValid(scheme) { +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { return fmt.Errorf("invalid name %q", m.Name) } if m.IsRegex { @@ -75,12 +76,12 @@ type Silence struct { } // Validate returns true iff all fields of the silence have valid values. -func (s *Silence) validate(scheme ValidationScheme) error { +func (s *Silence) Validate() error { if len(s.Matchers) == 0 { return errors.New("at least one matcher required") } for _, m := range s.Matchers { - if err := m.validate(scheme); err != nil { + if err := m.Validate(); err != nil { return fmt.Errorf("invalid matcher: %w", err) } } diff --git a/vendor/github.com/prometheus/common/model/silence_globalvalidationscheme.go b/vendor/github.com/prometheus/common/model/silence_globalvalidationscheme.go deleted file mode 100644 index fd6c230860f..00000000000 --- a/vendor/github.com/prometheus/common/model/silence_globalvalidationscheme.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !localvalidationscheme - -package model - -// Validate returns true iff all fields of the matcher have valid values. -func (m *Matcher) Validate() error { - return m.validate(NameValidationScheme) -} - -// Validate returns true iff all fields of the silence have valid values. -func (s *Silence) Validate() error { - return s.validate(NameValidationScheme) -} diff --git a/vendor/github.com/prometheus/common/model/silence_localvalidationscheme.go b/vendor/github.com/prometheus/common/model/silence_localvalidationscheme.go deleted file mode 100644 index ed2889e48d0..00000000000 --- a/vendor/github.com/prometheus/common/model/silence_localvalidationscheme.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build localvalidationscheme - -package model - -// Validate returns true iff all fields of the matcher have valid values. -func (m *Matcher) Validate(scheme ValidationScheme) error { - return m.validate(scheme) -} - -// Validate returns true iff all fields of the silence have valid values. -func (s *Silence) Validate(scheme ValidationScheme) error { - return s.validate(scheme) -} diff --git a/vendor/github.com/prometheus/common/promslog/slog.go b/vendor/github.com/prometheus/common/promslog/slog.go index 02370f17561..8da43aef527 100644 --- a/vendor/github.com/prometheus/common/promslog/slog.go +++ b/vendor/github.com/prometheus/common/promslog/slog.go @@ -197,13 +197,6 @@ func newGoKitStyleReplaceAttrFunc(lvl *Level) func(groups []string, a slog.Attr) } default: } - - // Ensure time.Duration values are _always_ formatted as a Go - // duration string (ie, "1d2h3m"). - if v, ok := a.Value.Any().(time.Duration); ok { - a.Value = slog.StringValue(v.String()) - } - return a } } @@ -245,13 +238,6 @@ func defaultReplaceAttr(_ []string, a slog.Attr) slog.Attr { } default: } - - // Ensure time.Duration values are _always_ formatted as a Go duration - // string (ie, "1d2h3m"). - if v, ok := a.Value.Any().(time.Duration); ok { - a.Value = slog.StringValue(v.String()) - } - return a } diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 7099ba325ab..12ca828ae8f 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -104,7 +104,7 @@ func Load(s string, logger *slog.Logger) (*Config, error) { } switch cfg.OTLPConfig.TranslationStrategy { - case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes: + case UnderscoreEscapingWithSuffixes: case "": case NoTranslation, NoUTF8EscapingWithSuffixes: if cfg.GlobalConfig.MetricNameValidationScheme == model.LegacyValidation { @@ -1534,68 +1534,31 @@ func getGoGC() int { type translationStrategyOption string var ( - // NoUTF8EscapingWithSuffixes will accept metric/label names as they are. Unit - // and type suffixes may be added to metric names, according to certain rules. + // NoUTF8EscapingWithSuffixes will accept metric/label names as they are. + // Unit and type suffixes may be added to metric names, according to certain rules. NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes" - // UnderscoreEscapingWithSuffixes is the default option for translating OTLP - // to Prometheus. This option will translate metric name characters that are - // not alphanumerics/underscores/colons to underscores, and label name - // characters that are not alphanumerics/underscores to underscores. Unit and - // type suffixes may be appended to metric names, according to certain rules. + // UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus. + // This option will translate metric name characters that are not alphanumerics/underscores/colons to underscores, + // and label name characters that are not alphanumerics/underscores to underscores. + // Unit and type suffixes may be appended to metric names, according to certain rules. UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes" - // UnderscoreEscapingWithoutSuffixes translates metric name characters that - // are not alphanumerics/underscores/colons to underscores, and label name - // characters that are not alphanumerics/underscores to underscores, but - // unlike UnderscoreEscapingWithSuffixes it does not append any suffixes to - // the names. - UnderscoreEscapingWithoutSuffixes translationStrategyOption = "UnderscoreEscapingWithoutSuffixes" // NoTranslation (EXPERIMENTAL): disables all translation of incoming metric - // and label names. This offers a way for the OTLP users to use native metric - // names, reducing confusion. + // and label names. This offers a way for the OTLP users to use native metric names, reducing confusion. // // WARNING: This setting has significant known risks and limitations (see - // https://prometheus.io/docs/practices/naming/ for details): * Impaired UX - // when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling - // configuration). * Series collisions which in the best case may result in - // OOO errors, in the worst case a silently malformed time series. For - // instance, you may end up in situation of ingesting `foo.bar` series with - // unit `seconds` and a separate series `foo.bar` with unit `milliseconds`. + // https://prometheus.io/docs/practices/naming/ for details): + // * Impaired UX when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling configuration). + // * Series collisions which in the best case may result in OOO errors, in the worst case a silently malformed + // time series. For instance, you may end up in situation of ingesting `foo.bar` series with unit + // `seconds` and a separate series `foo.bar` with unit `milliseconds`. // - // As a result, this setting is experimental and currently, should not be used - // in production systems. + // As a result, this setting is experimental and currently, should not be used in + // production systems. // - // TODO(ArthurSens): Mention `type-and-unit-labels` feature - // (https://github.com/prometheus/proposals/pull/39) once released, as - // potential mitigation of the above risks. + // TODO(ArthurSens): Mention `type-and-unit-labels` feature (https://github.com/prometheus/proposals/pull/39) once released, as potential mitigation of the above risks. NoTranslation translationStrategyOption = "NoTranslation" ) -// ShouldEscape returns true if the translation strategy requires that metric -// names be escaped. -func (o translationStrategyOption) ShouldEscape() bool { - switch o { - case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes: - return true - case NoTranslation, NoUTF8EscapingWithSuffixes: - return false - default: - return false - } -} - -// ShouldAddSuffixes returns a bool deciding whether the given translation -// strategy should have suffixes added. -func (o translationStrategyOption) ShouldAddSuffixes() bool { - switch o { - case UnderscoreEscapingWithSuffixes, NoUTF8EscapingWithSuffixes: - return true - case UnderscoreEscapingWithoutSuffixes, NoTranslation: - return false - default: - return false - } -} - // OTLPConfig is the configuration for writing to the OTLP endpoint. type OTLPConfig struct { PromoteAllResourceAttributes bool `yaml:"promote_all_resource_attributes,omitempty"` diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index 51a46ca2317..24950d9d59b 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -365,10 +365,8 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ func (m *Manager) sender() { ticker := time.NewTicker(m.updatert) - defer func() { - ticker.Stop() - close(m.syncCh) - }() + defer ticker.Stop() + for { select { case <-m.ctx.Done(): @@ -510,3 +508,19 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { } return failed } + +// StaticProvider holds a list of target groups that never change. +type StaticProvider struct { + TargetGroups []*targetgroup.Group +} + +// Run implements the Worker interface. +func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + // We still have to consider that the consumer exits right away in which case + // the context will be canceled. + select { + case ch <- sd.TargetGroups: + case <-ctx.Done(): + } + close(ch) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_slicelabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go similarity index 100% rename from vendor/github.com/prometheus/prometheus/model/labels/labels_slicelabels.go rename to vendor/github.com/prometheus/prometheus/model/labels/labels.go diff --git a/vendor/github.com/prometheus/prometheus/model/labels/validate.go b/vendor/github.com/prometheus/prometheus/model/labels/validate.go new file mode 100644 index 00000000000..db56ed47eaa --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/labels/validate.go @@ -0,0 +1,32 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import "github.com/prometheus/common/model" + +// IsValidMetricName returns whether name is a valid metric name, depending on the validation scheme. +func IsValidMetricName(name string, scheme model.ValidationScheme) bool { + if scheme == model.LegacyValidation { + return model.IsValidLegacyMetricName(name) + } + return model.IsValidMetricName(model.LabelValue(name)) +} + +// IsValidLabelName returns whether name is a valid label name, depending on the validation scheme. +func IsValidLabelName(name string, scheme model.ValidationScheme) bool { + if scheme == model.LegacyValidation { + return model.LabelName(name).IsValidLegacy() + } + return model.LabelName(name).IsValid() +} diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index 70daef426f5..6d7077d0b77 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -100,6 +100,8 @@ type Config struct { Replacement string `yaml:"replacement,omitempty" json:"replacement,omitempty"` // Action is the action to be performed for the relabeling. Action Action `yaml:"action,omitempty" json:"action,omitempty"` + // MetricNameValidationScheme to use when validating labels. + MetricNameValidationScheme model.ValidationScheme `yaml:"-" json:"-"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -112,6 +114,9 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.Regex.Regexp == nil { c.Regex = MustNewRegexp("") } + if c.MetricNameValidationScheme == model.UnsetValidation { + c.MetricNameValidationScheme = model.UTF8Validation + } return c.Validate() } @@ -125,7 +130,12 @@ func (c *Config) Validate() error { if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) } - if c.Action == Replace && !varInRegexTemplate(c.TargetLabel) && !model.LabelName(c.TargetLabel).IsValid() { + + if c.MetricNameValidationScheme == model.UnsetValidation { + return errors.New("MetricNameValidationScheme must be set in relabel configuration") + } + + if c.Action == Replace && !varInRegexTemplate(c.TargetLabel) && !labels.IsValidLabelName(c.TargetLabel, c.MetricNameValidationScheme) { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } @@ -133,12 +143,12 @@ func (c *Config) Validate() error { // UTF-8 allows ${} characters, so standard validation allow $variables by default. // TODO(bwplotka): Relabelling users cannot put $ and ${<...>} characters in metric names or values. // Design escaping mechanism to allow that, once valid use case appears. - return model.LabelName(value).IsValid() + return labels.IsValidLabelName(value, c.MetricNameValidationScheme) } if c.Action == Replace && varInRegexTemplate(c.TargetLabel) && !isValidLabelNameWithRegexVarFn(c.TargetLabel) { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } - if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !model.LabelName(c.TargetLabel).IsValid() { + if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !labels.IsValidLabelName(c.TargetLabel, c.MetricNameValidationScheme) { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement { @@ -147,7 +157,7 @@ func (c *Config) Validate() error { if c.Action == LabelMap && !isValidLabelNameWithRegexVarFn(c.Replacement) { return fmt.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action) } - if c.Action == HashMod && !model.LabelName(c.TargetLabel).IsValid() { + if c.Action == HashMod && !labels.IsValidLabelName(c.TargetLabel, c.MetricNameValidationScheme) { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } @@ -318,16 +328,16 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { if indexes == nil { break } - target := model.LabelName(cfg.Regex.ExpandString([]byte{}, cfg.TargetLabel, val, indexes)) - if !target.IsValid() { + target := string(cfg.Regex.ExpandString([]byte{}, cfg.TargetLabel, val, indexes)) + if !labels.IsValidLabelName(target, cfg.MetricNameValidationScheme) { break } res := cfg.Regex.ExpandString([]byte{}, cfg.Replacement, val, indexes) if len(res) == 0 { - lb.Del(string(target)) + lb.Del(target) break } - lb.Set(string(target), string(res)) + lb.Set(target, string(res)) case Lowercase: lb.Set(cfg.TargetLabel, strings.ToLower(val)) case Uppercase: diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go index 96b70cc66b0..46cae2d608f 100644 --- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go @@ -26,6 +26,7 @@ import ( "github.com/prometheus/common/model" "gopkg.in/yaml.v3" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -96,7 +97,7 @@ type ruleGroups struct { } // Validate validates all rules in the rule groups. -func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { +func (g *RuleGroups) Validate(node ruleGroups, validationScheme model.ValidationScheme) (errs []error) { set := map[string]struct{}{} for j, g := range g.Groups { @@ -112,7 +113,7 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { } for k, v := range g.Labels { - if !model.LabelName(k).IsValid() || k == model.MetricNameLabel { + if !labels.IsValidLabelName(k, validationScheme) || k == model.MetricNameLabel { errs = append( errs, fmt.Errorf("invalid label name: %s", k), ) @@ -128,7 +129,7 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { set[g.Name] = struct{}{} for i, r := range g.Rules { - for _, node := range r.Validate(node.Groups[j].Rules[i]) { + for _, node := range r.Validate(node.Groups[j].Rules[i], validationScheme) { var ruleName string if r.Alert != "" { ruleName = r.Alert @@ -198,7 +199,7 @@ type RuleNode struct { } // Validate the rule and return a list of encountered errors. -func (r *Rule) Validate(node RuleNode) (nodes []WrappedError) { +func (r *Rule) Validate(node RuleNode, validationScheme model.ValidationScheme) (nodes []WrappedError) { if r.Record != "" && r.Alert != "" { nodes = append(nodes, WrappedError{ err: errors.New("only one of 'record' and 'alert' must be set"), @@ -244,7 +245,7 @@ func (r *Rule) Validate(node RuleNode) (nodes []WrappedError) { node: &node.Record, }) } - if !model.IsValidMetricName(model.LabelValue(r.Record)) { + if !labels.IsValidMetricName(r.Record, validationScheme) { nodes = append(nodes, WrappedError{ err: fmt.Errorf("invalid recording rule name: %s", r.Record), node: &node.Record, @@ -261,7 +262,7 @@ func (r *Rule) Validate(node RuleNode) (nodes []WrappedError) { } for k, v := range r.Labels { - if !model.LabelName(k).IsValid() || k == model.MetricNameLabel { + if !labels.IsValidLabelName(k, validationScheme) || k == model.MetricNameLabel { nodes = append(nodes, WrappedError{ err: fmt.Errorf("invalid label name: %s", k), }) @@ -275,7 +276,7 @@ func (r *Rule) Validate(node RuleNode) (nodes []WrappedError) { } for k := range r.Annotations { - if !model.LabelName(k).IsValid() { + if !labels.IsValidLabelName(k, validationScheme) { nodes = append(nodes, WrappedError{ err: fmt.Errorf("invalid annotation name: %s", k), }) @@ -338,8 +339,29 @@ func testTemplateParsing(rl *Rule) (errs []error) { return errs } +type parseArgs struct { + validationScheme model.ValidationScheme +} + +type ParseOption func(*parseArgs) + +// WithValidationScheme returns a ParseOption setting the metric/label name validation scheme. +func WithValidationScheme(scheme model.ValidationScheme) ParseOption { + return func(args *parseArgs) { + args.validationScheme = scheme + } +} + // Parse parses and validates a set of rules. -func Parse(content []byte, ignoreUnknownFields bool) (*RuleGroups, []error) { +// The default metric/label name validation scheme is model.UTF8Validation. +func Parse(content []byte, ignoreUnknownFields bool, opts ...ParseOption) (*RuleGroups, []error) { + args := &parseArgs{ + validationScheme: model.UTF8Validation, + } + for _, opt := range opts { + opt(args) + } + var ( groups RuleGroups node ruleGroups @@ -364,16 +386,16 @@ func Parse(content []byte, ignoreUnknownFields bool) (*RuleGroups, []error) { return nil, errs } - return &groups, groups.Validate(node) + return &groups, groups.Validate(node, args.validationScheme) } // ParseFile reads and parses rules from a file. -func ParseFile(file string, ignoreUnknownFields bool) (*RuleGroups, []error) { +func ParseFile(file string, ignoreUnknownFields bool, opts ...ParseOption) (*RuleGroups, []error) { b, err := os.ReadFile(file) if err != nil { return nil, []error{fmt.Errorf("%s: %w", file, err)} } - rgs, errs := Parse(b, ignoreUnknownFields) + rgs, errs := Parse(b, ignoreUnknownFields, opts...) for i := range errs { errs[i] = fmt.Errorf("%s: %w", file, errs[i]) } diff --git a/vendor/github.com/prometheus/prometheus/notifier/manager.go b/vendor/github.com/prometheus/prometheus/notifier/manager.go index c9463b24a8d..043bbf4d108 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/manager.go +++ b/vendor/github.com/prometheus/prometheus/notifier/manager.go @@ -26,6 +26,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" @@ -254,10 +255,7 @@ func (n *Manager) targetUpdateLoop(tsets <-chan map[string][]*targetgroup.Group) select { case <-n.stopRequested: return - case ts, ok := <-tsets: - if !ok { - break - } + case ts := <-tsets: n.reload(ts) } } @@ -311,6 +309,11 @@ func (n *Manager) Send(alerts ...*Alert) { n.mtx.Lock() defer n.mtx.Unlock() + for i, rc := range n.opts.RelabelConfigs { + if rc.MetricNameValidationScheme == model.UnsetValidation { + n.opts.RelabelConfigs[i].MetricNameValidationScheme = model.UTF8Validation + } + } alerts = relabelAlerts(n.opts.RelabelConfigs, n.opts.ExternalLabels, alerts) if len(alerts) == 0 { return diff --git a/vendor/github.com/prometheus/prometheus/promql/durations.go b/vendor/github.com/prometheus/prometheus/promql/durations.go index c882adfbb63..20fa095d531 100644 --- a/vendor/github.com/prometheus/prometheus/promql/durations.go +++ b/vendor/github.com/prometheus/prometheus/promql/durations.go @@ -21,20 +21,11 @@ import ( "github.com/prometheus/prometheus/promql/parser" ) -// durationVisitor is a visitor that calculates the actual value of -// duration expressions in AST nodes. For example the query -// "http_requests_total offset (1h / 2)" is represented in the AST -// as a VectorSelector with OriginalOffset 0 and the duration expression -// in OriginalOffsetExpr representing (1h / 2). This visitor evaluates -// such duration expression, setting OriginalOffset to 30m. +// durationVisitor is a visitor that visits a duration expression and calculates the duration. type durationVisitor struct { step time.Duration } -// Visit finds any duration expressions in AST Nodes and modifies the Node to -// store the concrete value. Note that parser.Walk does NOT traverse the -// duration expressions such as OriginalOffsetExpr so we make our own recursive -// call on those to evaluate the result. func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visitor, error) { switch n := node.(type) { case *parser.VectorSelector: @@ -79,8 +70,7 @@ func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visit return v, nil } -// calculateDuration returns the float value of a duration expression as -// time.Duration or an error if the duration is invalid. +// calculateDuration computes the duration from a duration expression. func (v *durationVisitor) calculateDuration(expr parser.Expr, allowedNegative bool) (time.Duration, error) { duration, err := v.evaluateDurationExpr(expr) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index 3cdf299dffc..f8289111881 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -151,14 +151,17 @@ type PrometheusQueryOpts struct { enablePerStepStats bool // Lookback delta duration for this query. lookbackDelta time.Duration + // validationScheme for metric/label names. + validationScheme model.ValidationScheme } var _ QueryOpts = &PrometheusQueryOpts{} -func NewPrometheusQueryOpts(enablePerStepStats bool, lookbackDelta time.Duration) QueryOpts { +func NewPrometheusQueryOpts(enablePerStepStats bool, lookbackDelta time.Duration, validationScheme model.ValidationScheme) QueryOpts { return &PrometheusQueryOpts{ enablePerStepStats: enablePerStepStats, lookbackDelta: lookbackDelta, + validationScheme: validationScheme, } } @@ -170,11 +173,17 @@ func (p *PrometheusQueryOpts) LookbackDelta() time.Duration { return p.lookbackDelta } +func (p *PrometheusQueryOpts) ValidationScheme() model.ValidationScheme { + return p.validationScheme +} + type QueryOpts interface { // Enables recording per-step statistics if the engine has it enabled as well. Disabled by default. EnablePerStepStats() bool // Lookback delta duration for this query. LookbackDelta() time.Duration + // ValidationScheme to use for metric and label names. + ValidationScheme() model.ValidationScheme } // query implements the Query interface. @@ -193,6 +202,8 @@ type query struct { matrix Matrix // Cancellation function for the query. cancel func() + // validationScheme used for metric and label names. + validationScheme model.ValidationScheme // The engine against which the query is executed. ng *Engine @@ -520,7 +531,7 @@ func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts Q func (ng *Engine) newQuery(q storage.Queryable, qs string, opts QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) { if opts == nil { - opts = NewPrometheusQueryOpts(false, 0) + opts = NewPrometheusQueryOpts(false, 0, model.UTF8Validation) } lookbackDelta := opts.LookbackDelta() @@ -535,12 +546,13 @@ func (ng *Engine) newQuery(q storage.Queryable, qs string, opts QueryOpts, start LookbackDelta: lookbackDelta, } qry := &query{ - q: qs, - stmt: es, - ng: ng, - stats: stats.NewQueryTimers(), - sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats()), - queryable: q, + q: qs, + stmt: es, + ng: ng, + stats: stats.NewQueryTimers(), + sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats()), + queryable: q, + validationScheme: opts.ValidationScheme(), } return &es.Expr, qry } @@ -745,6 +757,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval enableDelayedNameRemoval: ng.enableDelayedNameRemoval, enableTypeAndUnitLabels: ng.enableTypeAndUnitLabels, querier: querier, + validationScheme: query.validationScheme, } query.sampleStats.InitStepTracking(start, start, 1) @@ -805,6 +818,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval enableDelayedNameRemoval: ng.enableDelayedNameRemoval, enableTypeAndUnitLabels: ng.enableTypeAndUnitLabels, querier: querier, + validationScheme: query.validationScheme, } query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval) val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr) @@ -1079,6 +1093,7 @@ type evaluator struct { enableDelayedNameRemoval bool enableTypeAndUnitLabels bool querier storage.Querier + validationScheme model.ValidationScheme } // errorf causes a panic with the input formatted into an error. @@ -1228,7 +1243,7 @@ func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annota // function call results. // The prepSeries function (if provided) can be used to prepare the helper // for each series, then passed to each call funcCall. -func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]Vector, Matrix, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { +func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 matrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs)) @@ -1250,7 +1265,8 @@ func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Label } } - vectors := make([]Vector, len(exprs)) // Input vectors for the function. + vectors := make([]Vector, len(exprs)) // Input vectors for the function. + args := make([]parser.Value, len(exprs)) // Argument to function. // Create an output vector that is as big as the input matrix with // the most time series. biggestLen := 1 @@ -1304,6 +1320,7 @@ func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Label sh = seriesHelpers[i] } vectors[i], bh = ev.gatherVector(ts, matrixes[i], vectors[i], bh, sh) + args[i] = vectors[i] if prepSeries != nil { bufHelpers[i] = bh } @@ -1311,7 +1328,7 @@ func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Label // Make the function call. enh.Ts = ts - result, ws := funcCall(vectors, nil, bufHelpers, enh) + result, ws := funcCall(args, bufHelpers, enh) enh.Out = result[:0] // Reuse result vector. warnings.Merge(ws) @@ -1676,15 +1693,15 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, if e.Op == parser.COUNT_VALUES { valueLabel := param.(*parser.StringLiteral) - if !model.LabelName(valueLabel.Val).IsValid() { + if !labels.IsValidLabelName(valueLabel.Val, ev.validationScheme) { ev.errorf("invalid label name %s", valueLabel) } if !e.Without { sortedGrouping = append(sortedGrouping, valueLabel.Val) slices.Sort(sortedGrouping) } - return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0], enh) + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0].(Vector), enh) }, e.Expr) } @@ -1764,18 +1781,22 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, if !matrixArg { // Does not have a matrix argument. - return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, annos := call(v, nil, e.Args, enh) + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, annos := call(v, e.Args, enh) return vec, warnings.Merge(annos) }, e.Args...) } + inArgs := make([]parser.Value, len(e.Args)) // Evaluate any non-matrix arguments. - evalVals := make([]Matrix, len(e.Args)) + otherArgs := make([]Matrix, len(e.Args)) + otherInArgs := make([]Vector, len(e.Args)) for i, e := range e.Args { if i != matrixArgIndex { val, ws := ev.eval(ctx, e) - evalVals[i] = val.(Matrix) + otherArgs[i] = val.(Matrix) + otherInArgs[i] = Vector{Sample{}} + inArgs[i] = otherInArgs[i] warnings.Merge(ws) } } @@ -1803,6 +1824,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, var histograms []HPoint var prevSS *Series inMatrix := make(Matrix, 1) + inArgs[matrixArgIndex] = inMatrix enh := &EvalNodeHelper{Out: make(Vector, 0, 1), enableDelayedNameRemoval: ev.enableDelayedNameRemoval} // Process all the calls for one time series at a time. it := storage.NewBuffer(selRange) @@ -1813,7 +1835,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, // vector functions, the only change needed is to drop the // metric name in the output. dropName := e.Func.Name != "last_over_time" - vectorVals := make([]Vector, len(e.Args)-1) + for i, s := range selVS.Series { if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) @@ -1841,11 +1863,9 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, // Set the non-matrix arguments. // They are scalar, so it is safe to use the step number // when looking up the argument, as there will be no gaps. - counter := 0 for j := range e.Args { if j != matrixArgIndex { - vectorVals[counter] = Vector{Sample{F: evalVals[j][0].Floats[step].F}} - counter++ + otherInArgs[j][0].F = otherArgs[j][0].Floats[step].F } } // Evaluate the matrix selector for this series @@ -1862,9 +1882,8 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, inMatrix[0].Floats = floats inMatrix[0].Histograms = histograms enh.Ts = ts - // Make the function call. - outVec, annos := call(vectorVals, inMatrix, e.Args, enh) + outVec, annos := call(inArgs, e.Args, enh) warnings.Merge(annos) ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+totalHPointSize(histograms))) @@ -1904,7 +1923,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, if ev.enableTypeAndUnitLabels { // When type-and-unit-labels feature is enabled, check __type__ label typeLabel := inMatrix[0].Metric.Get("__type__") - if typeLabel != string(model.MetricTypeCounter) && typeLabel != string(model.MetricTypeHistogram) { + if typeLabel != string(model.MetricTypeCounter) { warnings.Add(annotations.NewPossibleNonCounterLabelInfo(metricName, typeLabel, e.Args[0].PositionRange())) } } else if !strings.HasSuffix(metricName, "_total") && @@ -1998,8 +2017,8 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, case *parser.BinaryExpr: switch lt, rt := e.LHS.Type(), e.RHS.Type(); { case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: - return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - val := scalarBinop(e.Op, v[0][0].F, v[1][0].F) + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) return append(enh.Out, Sample{F: val}), nil }, e.LHS, e.RHS) case lt == parser.ValueTypeVector && rt == parser.ValueTypeVector: @@ -2011,40 +2030,40 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, } switch e.Op { case parser.LAND: - return ev.rangeEval(ctx, initSignatures, func(v []Vector, _ Matrix, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorAnd(v[0], v[1], e.VectorMatching, sh[0], sh[1], enh), nil + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LOR: - return ev.rangeEval(ctx, initSignatures, func(v []Vector, _ Matrix, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorOr(v[0], v[1], e.VectorMatching, sh[0], sh[1], enh), nil + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LUNLESS: - return ev.rangeEval(ctx, initSignatures, func(v []Vector, _ Matrix, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorUnless(v[0], v[1], e.VectorMatching, sh[0], sh[1], enh), nil + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) default: - return ev.rangeEval(ctx, initSignatures, func(v []Vector, _ Matrix, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorBinop(e.Op, v[0], v[1], e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh, e.PositionRange()) + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: - return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorscalarBinop(e.Op, v[0], Scalar{V: v[1][0].F}, false, e.ReturnBool, enh, e.PositionRange()) + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: - return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorscalarBinop(e.Op, v[1], Scalar{V: v[0][0].F}, true, e.ReturnBool, enh, e.PositionRange()) + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case *parser.NumberLiteral: span.SetAttributes(attribute.Float64("value", e.Val)) - return ev.rangeEval(ctx, nil, func(_ []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(_ []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) @@ -2080,6 +2099,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, enableDelayedNameRemoval: ev.enableDelayedNameRemoval, enableTypeAndUnitLabels: ev.enableTypeAndUnitLabels, querier: ev.querier, + validationScheme: ev.validationScheme, } if e.Step != 0 { @@ -2126,6 +2146,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, enableDelayedNameRemoval: ev.enableDelayedNameRemoval, enableTypeAndUnitLabels: ev.enableTypeAndUnitLabels, querier: ev.querier, + validationScheme: ev.validationScheme, } res, ws := newEv.eval(ctx, e.Expr) ev.currentSamples = newEv.currentSamples @@ -2215,7 +2236,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1) } - return ev.rangeEval(ctx, nil, func(_ []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(_ []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if vs.Timestamp != nil { // This is a special case for "timestamp()" when the @ modifier is used, to ensure that // we return a point for each time step in this case. @@ -2244,7 +2265,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co } } ev.samplesStats.UpdatePeak(ev.currentSamples) - vec, annos := call([]Vector{vec}, nil, e.Args, enh) + vec, annos := call([]parser.Value{vec}, e.Args, enh) return vec, ws.Merge(annos) }) } diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index d9839c5a054..d22329139dc 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -56,10 +56,10 @@ import ( // metrics, the timestamp are not needed. // // Scalar results should be returned as the value of a sample in a Vector. -type FunctionCall func(vectorVals []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) +type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) // === time() float64 === -func funcTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{ F: float64(enh.Ts) / 1000, }}, nil @@ -69,11 +69,11 @@ func funcTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) ( // It calculates the rate (allowing for counter resets if isCounter is true), // extrapolates if the first/last sample is close to the boundary, and returns // the result as either per-second (if isRate is true) or overall. -func extrapolatedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) { +func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) { ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( - samples = vals[0] + samples = vals[0].(Matrix)[0] rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) resultFloat float64 @@ -144,37 +144,32 @@ func extrapolatedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper, // (which is our guess for where the series actually starts or ends). extrapolationThreshold := averageDurationBetweenSamples * 1.1 + extrapolateToInterval := sampledInterval + if durationToStart >= extrapolationThreshold { durationToStart = averageDurationBetweenSamples / 2 } - if isCounter { + if isCounter && resultFloat > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 { // Counters cannot be negative. If we have any slope at all // (i.e. resultFloat went up), we can extrapolate the zero point // of the counter. If the duration to the zero point is shorter // than the durationToStart, we take the zero point as the start // of the series, thereby avoiding extrapolation to negative // counter values. - durationToZero := durationToStart - if resultFloat > 0 && - len(samples.Floats) > 0 && - samples.Floats[0].F >= 0 { - durationToZero = sampledInterval * (samples.Floats[0].F / resultFloat) - } else if resultHistogram != nil && - resultHistogram.Count > 0 && - len(samples.Histograms) > 0 && - samples.Histograms[0].H.Count >= 0 { - durationToZero = sampledInterval * (samples.Histograms[0].H.Count / resultHistogram.Count) - } + // TODO(beorn7): Do this for histograms, too. + durationToZero := sampledInterval * (samples.Floats[0].F / resultFloat) if durationToZero < durationToStart { durationToStart = durationToZero } } + extrapolateToInterval += durationToStart if durationToEnd >= extrapolationThreshold { durationToEnd = averageDurationBetweenSamples / 2 } + extrapolateToInterval += durationToEnd - factor := (sampledInterval + durationToStart + durationToEnd) / sampledInterval + factor := extrapolateToInterval / sampledInterval if isRate { factor /= ms.Range.Seconds() } @@ -288,33 +283,33 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra } // === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcDelta(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return extrapolatedRate(matrixVals, args, enh, false, false) +func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return extrapolatedRate(vals, args, enh, false, false) } // === rate(node parser.ValueTypeMatrix) (Vector, Annotations) === -func funcRate(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return extrapolatedRate(matrixVals, args, enh, true, true) +func funcRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return extrapolatedRate(vals, args, enh, true, true) } // === increase(node parser.ValueTypeMatrix) (Vector, Annotations) === -func funcIncrease(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return extrapolatedRate(matrixVals, args, enh, true, false) +func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return extrapolatedRate(vals, args, enh, true, false) } // === irate(node parser.ValueTypeMatrix) (Vector, Annotations) === -func funcIrate(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return instantValue(matrixVals, args, enh.Out, true) +func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return instantValue(vals, args, enh.Out, true) } // === idelta(node model.ValMatrix) (Vector, Annotations) === -func funcIdelta(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return instantValue(matrixVals, args, enh.Out, false) +func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return instantValue(vals, args, enh.Out, false) } -func instantValue(vals Matrix, args parser.Expressions, out Vector, isRate bool) (Vector, annotations.Annotations) { +func instantValue(vals []parser.Value, args parser.Expressions, out Vector, isRate bool) (Vector, annotations.Annotations) { var ( - samples = vals[0] + samples = vals[0].(Matrix)[0] metricName = samples.Metric.Get(labels.MetricName) ss = make([]Sample, 0, 2) annos annotations.Annotations @@ -441,14 +436,14 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // affects how trends in historical data will affect the current data. A higher // trend factor increases the influence. of trends. Algorithm taken from // https://en.wikipedia.org/wiki/Exponential_smoothing . -func funcDoubleExponentialSmoothing(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - samples := matrixVal[0] +func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + samples := vals[0].(Matrix)[0] metricName := samples.Metric.Get(labels.MetricName) // The smoothing factor argument. - sf := vectorVals[0][0].F + sf := vals[1].(Vector)[0].F // The trend factor argument. - tf := vectorVals[1][0].F + tf := vals[2].(Vector)[0].F // Check that the input parameters are valid. if sf <= 0 || sf >= 1 { @@ -504,27 +499,27 @@ func filterFloats(v Vector) Vector { } // === sort(node parser.ValueTypeVector) (Vector, Annotations) === -func funcSort(vectorVals []Vector, _ Matrix, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSort(vals []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take descending sort with NaN first and // reverse it. - byValueSorter := vectorByReverseValueHeap(filterFloats(vectorVals[0])) + byValueSorter := vectorByReverseValueHeap(filterFloats(vals[0].(Vector))) sort.Sort(sort.Reverse(byValueSorter)) return Vector(byValueSorter), nil } // === sortDesc(node parser.ValueTypeVector) (Vector, Annotations) === -func funcSortDesc(vectorVals []Vector, _ Matrix, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortDesc(vals []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take ascending sort with NaN first and // reverse it. - byValueSorter := vectorByValueHeap(filterFloats(vectorVals[0])) + byValueSorter := vectorByValueHeap(filterFloats(vals[0].(Vector))) sort.Sort(sort.Reverse(byValueSorter)) return Vector(byValueSorter), nil } // === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === -func funcSortByLabel(vectorVals []Vector, _ Matrix, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortByLabel(vals []parser.Value, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { lbls := stringSliceFromArgs(args[1:]) - slices.SortFunc(vectorVals[0], func(a, b Sample) int { + slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { for _, label := range lbls { lv1 := a.Metric.Get(label) lv2 := b.Metric.Get(label) @@ -544,13 +539,13 @@ func funcSortByLabel(vectorVals []Vector, _ Matrix, args parser.Expressions, _ * return labels.Compare(a.Metric, b.Metric) }) - return vectorVals[0], nil + return vals[0].(Vector), nil } // === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === -func funcSortByLabelDesc(vectorVals []Vector, _ Matrix, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { lbls := stringSliceFromArgs(args[1:]) - slices.SortFunc(vectorVals[0], func(a, b Sample) int { + slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { for _, label := range lbls { lv1 := a.Metric.Get(label) lv2 := b.Metric.Get(label) @@ -570,7 +565,7 @@ func funcSortByLabelDesc(vectorVals []Vector, _ Matrix, args parser.Expressions, return -labels.Compare(a.Metric, b.Metric) }) - return vectorVals[0], nil + return vals[0].(Vector), nil } func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, annotations.Annotations) { @@ -595,46 +590,46 @@ func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, ann } // === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === -func funcClamp(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vectorVals[0] - minVal := vectorVals[1][0].F - maxVal := vectorVals[2][0].F +func funcClamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vals[0].(Vector) + minVal := vals[1].(Vector)[0].F + maxVal := vals[2].(Vector)[0].F return clamp(vec, minVal, maxVal, enh) } // === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === -func funcClampMax(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vectorVals[0] - maxVal := vectorVals[1][0].F +func funcClampMax(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vals[0].(Vector) + maxVal := vals[1].(Vector)[0].F return clamp(vec, math.Inf(-1), maxVal, enh) } // === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === -func funcClampMin(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vectorVals[0] - minVal := vectorVals[1][0].F +func funcClampMin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vals[0].(Vector) + minVal := vals[1].(Vector)[0].F return clamp(vec, minVal, math.Inf(+1), enh) } // === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) === -func funcRound(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { // round returns a number rounded to toNearest. // Ties are solved by rounding up. toNearest := float64(1) if len(args) >= 2 { - toNearest = vectorVals[1][0].F + toNearest = vals[1].(Vector)[0].F } // Invert as it seems to cause fewer floating point accuracy issues. toNearestInverse := 1.0 / toNearest - return simpleFloatFunc(vectorVals, enh, func(f float64) float64 { + return simpleFloatFunc(vals, enh, func(f float64) float64 { return math.Floor(f*toNearestInverse+0.5) / toNearestInverse }), nil } // === Scalar(node parser.ValueTypeVector) Scalar === -func funcScalar(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcScalar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { var ( - v = vectorVals[0] + v = vals[0].(Vector) value float64 found bool ) @@ -656,22 +651,22 @@ func funcScalar(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNo return append(enh.Out, Sample{F: value}), nil } -func aggrOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { - el := matrixVal[0] +func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { + el := vals[0].(Matrix)[0] return append(enh.Out, Sample{F: aggrFn(el)}) } -func aggrHistOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) { - el := matrixVal[0] +func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) { + el := vals[0].(Matrix)[0] res, err := aggrFn(el) return append(enh.Out, Sample{H: res}), err } // === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - firstSeries := matrixVal[0] +func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + firstSeries := vals[0].(Matrix)[0] if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 { metricName := firstSeries.Metric.Get(labels.MetricName) return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) @@ -700,7 +695,7 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh // the current implementation is accurate enough for practical purposes. if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. - vec, err := aggrHistOverTime(matrixVal, enh, func(s Series) (*histogram.FloatHistogram, error) { + vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) { mean := s.Histograms[0].H.Copy() for i, h := range s.Histograms[1:] { count := float64(i + 2) @@ -727,7 +722,7 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh } return vec, nil } - return aggrOverTime(matrixVal, enh, func(s Series) float64 { + return aggrOverTime(vals, enh, func(s Series) float64 { var ( // Pre-set the 1st sample to start the loop with the 2nd. sum, count = s.Floats[0].F, 1. @@ -761,15 +756,15 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh } // === count_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcCountOverTime(_ []Vector, matrixVals Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return aggrOverTime(matrixVals, enh, func(s Series) float64 { +func funcCountOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return aggrOverTime(vals, enh, func(s Series) float64 { return float64(len(s.Floats) + len(s.Histograms)) }), nil } // === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - el := matrixVal[0] +func funcLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + el := vals[0].(Matrix)[0] var f FPoint if len(el.Floats) > 0 { @@ -794,8 +789,8 @@ func funcLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *E } // === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcMadOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - samples := matrixVal[0] +func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + samples := vals[0].(Matrix)[0] var annos annotations.Annotations if len(samples.Floats) == 0 { return enh.Out, nil @@ -804,7 +799,7 @@ func funcMadOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh metricName := samples.Metric.Get(labels.MetricName) annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) } - return aggrOverTime(matrixVal, enh, func(s Series) float64 { + return aggrOverTime(vals, enh, func(s Series) float64 { values := make(vectorByValueHeap, 0, len(s.Floats)) for _, f := range s.Floats { values = append(values, Sample{F: f.F}) @@ -819,8 +814,8 @@ func funcMadOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh } // === ts_of_last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcTsOfLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - el := matrixVal[0] +func funcTsOfLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + el := vals[0].(Matrix)[0] var tf int64 if len(el.Floats) > 0 { @@ -839,22 +834,22 @@ func funcTsOfLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, en } // === ts_of_max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcTsOfMaxOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return compareOverTime(matrixVal, args, enh, func(cur, maxVal float64) bool { +func funcTsOfMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { return (cur >= maxVal) || math.IsNaN(maxVal) }, true) } // === ts_of_min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcTsOfMinOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return compareOverTime(matrixVals, args, enh, func(cur, maxVal float64) bool { +func funcTsOfMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { return (cur <= maxVal) || math.IsNaN(maxVal) }, true) } // compareOverTime is a helper used by funcMaxOverTime and funcMinOverTime. -func compareOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper, compareFn func(float64, float64) bool, returnTimestamp bool) (Vector, annotations.Annotations) { - samples := matrixVal[0] +func compareOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, compareFn func(float64, float64) bool, returnTimestamp bool) (Vector, annotations.Annotations) { + samples := vals[0].(Matrix)[0] var annos annotations.Annotations if len(samples.Floats) == 0 { return enh.Out, nil @@ -863,7 +858,7 @@ func compareOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHel metricName := samples.Metric.Get(labels.MetricName) annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) } - return aggrOverTime(matrixVal, enh, func(s Series) float64 { + return aggrOverTime(vals, enh, func(s Series) float64 { maxVal := s.Floats[0].F tsOfMax := s.Floats[0].T for _, f := range s.Floats { @@ -880,29 +875,29 @@ func compareOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHel } // === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcMaxOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return compareOverTime(matrixVals, args, enh, func(cur, maxVal float64) bool { +func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { return (cur > maxVal) || math.IsNaN(maxVal) }, false) } // === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcMinOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return compareOverTime(matrixVals, args, enh, func(cur, maxVal float64) bool { +func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { return (cur < maxVal) || math.IsNaN(maxVal) }, false) } // === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - firstSeries := matrixVal[0] +func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + firstSeries := vals[0].(Matrix)[0] if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 { metricName := firstSeries.Metric.Get(labels.MetricName) return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) } if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. - vec, err := aggrHistOverTime(matrixVal, enh, func(s Series) (*histogram.FloatHistogram, error) { + vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) { sum := s.Histograms[0].H.Copy() for _, h := range s.Histograms[1:] { _, err := sum.Add(h.H) @@ -922,7 +917,7 @@ func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh } return vec, nil } - return aggrOverTime(matrixVal, enh, func(s Series) float64 { + return aggrOverTime(vals, enh, func(s Series) float64 { var sum, c float64 for _, f := range s.Floats { sum, c = kahanSumInc(f.F, sum, c) @@ -935,9 +930,9 @@ func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh } // === quantile_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcQuantileOverTime(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - q := vectorVals[0][0].F - el := matrixVal[0] +func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + q := vals[0].(Vector)[0].F + el := vals[1].(Matrix)[0] if len(el.Floats) == 0 { return enh.Out, nil } @@ -957,8 +952,8 @@ func funcQuantileOverTime(vectorVals []Vector, matrixVal Matrix, args parser.Exp return append(enh.Out, Sample{F: quantile(q, values)}), annos } -func varianceOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { - samples := matrixVal[0] +func varianceOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { + samples := vals[0].(Matrix)[0] var annos annotations.Annotations if len(samples.Floats) == 0 { return enh.Out, nil @@ -967,7 +962,7 @@ func varianceOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHe metricName := samples.Metric.Get(labels.MetricName) annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) } - return aggrOverTime(matrixVal, enh, func(s Series) float64 { + return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 var aux, cAux float64 @@ -986,18 +981,18 @@ func varianceOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHe } // === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcStddevOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return varianceOverTime(matrixVals, args, enh, math.Sqrt) +func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return varianceOverTime(vals, args, enh, math.Sqrt) } // === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcStdvarOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return varianceOverTime(matrixVals, args, enh, nil) +func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return varianceOverTime(vals, args, enh, nil) } // === absent(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAbsent(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vectorVals[0]) > 0 { +func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + if len(vals[0].(Vector)) > 0 { return enh.Out, nil } return append(enh.Out, @@ -1012,19 +1007,19 @@ func funcAbsent(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *Eva // This function will return 1 if the matrix has at least one element. // Due to engine optimization, this function is only called when this condition is true. // Then, the engine post-processes the results to get the expected output. -func funcAbsentOverTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAbsentOverTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: 1}), nil } // === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) === -func funcPresentOverTime(_ []Vector, matrixVals Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return aggrOverTime(matrixVals, enh, func(_ Series) float64 { +func funcPresentOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return aggrOverTime(vals, enh, func(_ Series) float64 { return 1 }), nil } -func simpleFloatFunc(vectorVals []Vector, enh *EvalNodeHelper, f func(float64) float64) Vector { - for _, el := range vectorVals[0] { +func simpleFloatFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { + for _, el := range vals[0].(Vector) { if el.H == nil { // Process only float samples. if !enh.enableDelayedNameRemoval { el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel) @@ -1040,127 +1035,127 @@ func simpleFloatFunc(vectorVals []Vector, enh *EvalNodeHelper, f func(float64) f } // === abs(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAbs(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Abs), nil +func funcAbs(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Abs), nil } // === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCeil(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Ceil), nil +func funcCeil(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Ceil), nil } // === floor(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcFloor(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Floor), nil +func funcFloor(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Floor), nil } // === exp(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcExp(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Exp), nil +func funcExp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Exp), nil } // === sqrt(Vector VectorNode) (Vector, Annotations) === -func funcSqrt(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Sqrt), nil +func funcSqrt(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Sqrt), nil } // === ln(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLn(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Log), nil +func funcLn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Log), nil } // === log2(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLog2(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Log2), nil +func funcLog2(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Log2), nil } // === log10(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLog10(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Log10), nil +func funcLog10(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Log10), nil } // === sin(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSin(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Sin), nil +func funcSin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Sin), nil } // === cos(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCos(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Cos), nil +func funcCos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Cos), nil } // === tan(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTan(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Tan), nil +func funcTan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Tan), nil } // === asin(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAsin(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Asin), nil +func funcAsin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Asin), nil } // === acos(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAcos(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Acos), nil +func funcAcos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Acos), nil } // === atan(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAtan(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Atan), nil +func funcAtan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Atan), nil } // === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSinh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Sinh), nil +func funcSinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Sinh), nil } // === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCosh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Cosh), nil +func funcCosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Cosh), nil } // === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTanh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Tanh), nil +func funcTanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Tanh), nil } // === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAsinh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Asinh), nil +func funcAsinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Asinh), nil } // === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAcosh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Acosh), nil +func funcAcosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Acosh), nil } // === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAtanh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, math.Atanh), nil +func funcAtanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Atanh), nil } // === rad(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcRad(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, func(v float64) float64 { +func funcRad(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, func(v float64) float64 { return v * math.Pi / 180 }), nil } // === deg(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcDeg(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, func(v float64) float64 { +func funcDeg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, func(v float64) float64 { return v * 180 / math.Pi }), nil } // === pi() Scalar === -func funcPi(_ []Vector, _ Matrix, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcPi(_ []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{F: math.Pi}}, nil } // === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSgn(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vectorVals, enh, func(v float64) float64 { +func funcSgn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, func(v float64) float64 { switch { case v < 0: return -1 @@ -1173,8 +1168,8 @@ func funcSgn(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeH } // === timestamp(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTimestamp(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vectorVals[0] +func funcTimestamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vals[0].(Vector) for _, el := range vec { if !enh.enableDelayedNameRemoval { el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel) @@ -1250,8 +1245,8 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f } // === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) === -func funcDeriv(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - samples := matrixVal[0] +func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + samples := vals[0].(Matrix)[0] metricName := samples.Metric.Get(labels.MetricName) // No sense in trying to compute a derivative without at least two float points. @@ -1275,9 +1270,9 @@ func funcDeriv(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalN } // === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) (Vector, Annotations) === -func funcPredictLinear(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - samples := matrixVal[0] - duration := vectorVals[0][0].F +func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + samples := vals[0].(Matrix)[0] + duration := vals[1].(Vector)[0].F metricName := samples.Metric.Get(labels.MetricName) // No sense in trying to predict anything without at least two float points. @@ -1297,8 +1292,8 @@ func funcPredictLinear(vectorVals []Vector, matrixVal Matrix, args parser.Expres return append(enh.Out, Sample{F: slope*duration + intercept}), nil } -func simpleHistogramFunc(vectorVals []Vector, enh *EvalNodeHelper, f func(h *histogram.FloatHistogram) float64) Vector { - for _, el := range vectorVals[0] { +func simpleHistogramFunc(vals []parser.Value, enh *EvalNodeHelper, f func(h *histogram.FloatHistogram) float64) Vector { + for _, el := range vals[0].(Vector) { if el.H != nil { // Process only histogram samples. if !enh.enableDelayedNameRemoval { el.Metric = el.Metric.DropMetricName() @@ -1314,28 +1309,28 @@ func simpleHistogramFunc(vectorVals []Vector, enh *EvalNodeHelper, f func(h *his } // === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramCount(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 { +func funcHistogramCount(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { return h.Count }), nil } // === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramSum(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 { +func funcHistogramSum(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { return h.Sum }), nil } // === histogram_avg(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramAvg(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 { +func funcHistogramAvg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { return h.Sum / h.Count }), nil } -func histogramVariance(vectorVals []Vector, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { - return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 { +func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { mean := h.Sum / h.Count var variance, cVariance float64 it := h.AllBucketIterator() @@ -1372,20 +1367,20 @@ func histogramVariance(vectorVals []Vector, enh *EvalNodeHelper, varianceToResul } // === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramStdDev(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return histogramVariance(vectorVals, enh, math.Sqrt) +func funcHistogramStdDev(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return histogramVariance(vals, enh, math.Sqrt) } // === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramStdVar(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return histogramVariance(vectorVals, enh, nil) +func funcHistogramStdVar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return histogramVariance(vals, enh, nil) } // === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramFraction(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - lower := vectorVals[0][0].F - upper := vectorVals[1][0].F - inVec := vectorVals[2] +func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + lower := vals[0].(Vector)[0].F + upper := vals[1].(Vector)[0].F + inVec := vals[2].(Vector) annos := enh.resetHistograms(inVec, args[2]) @@ -1427,9 +1422,9 @@ func funcHistogramFraction(vectorVals []Vector, _ Matrix, args parser.Expression } // === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - q := vectorVals[0][0].F - inVec := vectorVals[1] +func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + q := vals[0].(Vector)[0].F + inVec := vals[1].(Vector) var annos annotations.Annotations if math.IsNaN(q) || q < 0 || q > 1 { @@ -1479,9 +1474,9 @@ func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expression } // === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - floats := matrixVal[0].Floats - histograms := matrixVal[0].Histograms +func funcResets(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + floats := vals[0].(Matrix)[0].Floats + histograms := vals[0].(Matrix)[0].Histograms resets := 0 if len(floats) == 0 && len(histograms) == 0 { return enh.Out, nil @@ -1524,9 +1519,9 @@ func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNod } // === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcChanges(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - floats := matrixVal[0].Floats - histograms := matrixVal[0].Histograms +func funcChanges(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + floats := vals[0].(Matrix)[0].Floats + histograms := vals[0].(Matrix)[0].Histograms changes := 0 if len(floats) == 0 && len(histograms) == 0 { return enh.Out, nil @@ -1581,7 +1576,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio if err != nil { panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) } - if !model.LabelName(dst).IsValid() { + if !labels.IsValidLabelName(dst, ev.validationScheme) { panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) } @@ -1612,11 +1607,11 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio } // === Vector(s Scalar) (Vector, Annotations) === -func funcVector(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcVector(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{ Metric: labels.Labels{}, - F: vectorVals[0][0].F, + F: vals[0].(Vector)[0].F, }), nil } @@ -1629,12 +1624,12 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) ) for i := 3; i < len(args); i++ { src := stringFromArg(args[i]) - if !model.LabelName(src).IsValid() { + if !labels.IsValidLabelName(src, ev.validationScheme) { panic(fmt.Errorf("invalid source label name in label_join(): %s", src)) } srcLabels[i-3] = src } - if !model.LabelName(dst).IsValid() { + if !labels.IsValidLabelName(dst, ev.validationScheme) { panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst)) } @@ -1666,8 +1661,8 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) } // Common code for date related functions. -func dateWrapper(vectorVals []Vector, enh *EvalNodeHelper, f func(time.Time) float64) Vector { - if len(vectorVals) == 0 { +func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector { + if len(vals) == 0 { return append(enh.Out, Sample{ Metric: labels.Labels{}, @@ -1675,7 +1670,7 @@ func dateWrapper(vectorVals []Vector, enh *EvalNodeHelper, f func(time.Time) flo }) } - for _, el := range vectorVals[0] { + for _, el := range vals[0].(Vector) { if el.H != nil { // Ignore histogram sample. continue @@ -1694,57 +1689,57 @@ func dateWrapper(vectorVals []Vector, enh *EvalNodeHelper, f func(time.Time) flo } // === days_in_month(v Vector) Scalar === -func funcDaysInMonth(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vectorVals, enh, func(t time.Time) float64 { +func funcDaysInMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day()) }), nil } // === day_of_month(v Vector) Scalar === -func funcDayOfMonth(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vectorVals, enh, func(t time.Time) float64 { +func funcDayOfMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Day()) }), nil } // === day_of_week(v Vector) Scalar === -func funcDayOfWeek(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vectorVals, enh, func(t time.Time) float64 { +func funcDayOfWeek(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Weekday()) }), nil } // === day_of_year(v Vector) Scalar === -func funcDayOfYear(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vectorVals, enh, func(t time.Time) float64 { +func funcDayOfYear(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.YearDay()) }), nil } // === hour(v Vector) Scalar === -func funcHour(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vectorVals, enh, func(t time.Time) float64 { +func funcHour(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Hour()) }), nil } // === minute(v Vector) Scalar === -func funcMinute(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vectorVals, enh, func(t time.Time) float64 { +func funcMinute(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Minute()) }), nil } // === month(v Vector) Scalar === -func funcMonth(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vectorVals, enh, func(t time.Time) float64 { +func funcMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Month()) }), nil } // === year(v Vector) Scalar === -func funcYear(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vectorVals, enh, func(t time.Time) float64 { +func funcYear(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Year()) }), nil } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index e7e16cd0330..3747e01f53c 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -23,8 +23,6 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/promql/parser/posrange" - - "github.com/prometheus/common/model" ) %} @@ -378,14 +376,14 @@ grouping_label_list: grouping_label : maybe_label { - if !model.LabelName($1.Val).IsValid() { + if !labels.IsValidLabelName($1.Val, yylex.(*parser).validationScheme) { yylex.(*parser).addParseErrf($1.PositionRange(),"invalid label name for grouping: %q", $1.Val) } $$ = $1 } | STRING { unquoted := yylex.(*parser).unquoteString($1.Val) - if !model.LabelName(unquoted).IsValid() { + if !labels.IsValidLabelName(unquoted, yylex.(*parser).validationScheme) { yylex.(*parser).addParseErrf($1.PositionRange(),"invalid label name for grouping: %q", unquoted) } $$ = $1 diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go index e93d1b3de6b..17fd159e5a6 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go @@ -12,8 +12,6 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql/parser/posrange" - - "github.com/prometheus/common/model" ) type yySymType struct { @@ -1327,7 +1325,7 @@ yydefault: case 59: yyDollar = yyS[yypt-1 : yypt+1] { - if !model.LabelName(yyDollar[1].item.Val).IsValid() { + if !labels.IsValidLabelName(yyDollar[1].item.Val, yylex.(*parser).validationScheme) { yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid label name for grouping: %q", yyDollar[1].item.Val) } yyVAL.item = yyDollar[1].item @@ -1336,7 +1334,7 @@ yydefault: yyDollar = yyS[yypt-1 : yypt+1] { unquoted := yylex.(*parser).unquoteString(yyDollar[1].item.Val) - if !model.LabelName(unquoted).IsValid() { + if !labels.IsValidLabelName(unquoted, yylex.(*parser).validationScheme) { yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid label name for grouping: %q", unquoted) } yyVAL.item = yyDollar[1].item diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go index 03c6a8446a0..fa8cc2ec852 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go @@ -71,6 +71,8 @@ type parser struct { generatedParserResult interface{} parseErrors ParseErrors + + validationScheme model.ValidationScheme } type Opt func(p *parser) @@ -81,6 +83,14 @@ func WithFunctions(functions map[string]*Function) Opt { } } +// WithValidationScheme controls how metric/label names are validated. +// Defaults to UTF8Validation. +func WithValidationScheme(scheme model.ValidationScheme) Opt { + return func(p *parser) { + p.validationScheme = scheme + } +} + // NewParser returns a new parser. func NewParser(input string, opts ...Opt) *parser { //nolint:revive // unexported-return p := parserPool.Get().(*parser) @@ -90,6 +100,7 @@ func NewParser(input string, opts ...Opt) *parser { //nolint:revive // unexporte p.parseErrors = nil p.generatedParserResult = nil p.closingParens = make([]posrange.Pos, 0) + p.validationScheme = model.UTF8Validation // Clear lexer struct before reusing. p.lex = Lexer{ diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test index 576b36868f1..2fee20e630c 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test @@ -232,38 +232,30 @@ load 5m http_requests_histogram{job="api-server", instance="3", group="canary"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}} eval instant at 0m max(http_requests) - expect no_info {} 4 # The histogram is ignored here so the result doesn't change but it has an info annotation now. -eval instant at 0m max({job="api-server"}) - expect info +eval_info instant at 0m max({job="api-server"}) {} 4 # The histogram is ignored here so there is no result but it has an info annotation now. -eval instant at 0m max(http_requests_histogram) - expect info +eval_info instant at 0m max(http_requests_histogram) eval instant at 0m min(http_requests) - expect no_info {} 1 # The histogram is ignored here so the result doesn't change but it has an info annotation now. -eval instant at 0m min({job="api-server"}) - expect info +eval_info instant at 0m min({job="api-server"}) {} 1 # The histogram is ignored here so there is no result but it has an info annotation now. -eval instant at 0m min(http_requests_histogram) - expect info +eval_info instant at 0m min(http_requests_histogram) eval instant at 0m max by (group) (http_requests) - expect no_info {group="production"} 2 {group="canary"} 4 eval instant at 0m min by (group) (http_requests) - expect no_info {group="production"} 1 {group="canary"} 3 @@ -284,31 +276,26 @@ load 5m http_requests_histogram{job="api-server", instance="3", group="production"} {{schema:0 sum:20 count:20}}x11 foo 1+1x9 3 -eval instant at 50m topk(3, http_requests) - expect ordered +eval_ordered instant at 50m topk(3, http_requests) http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="1", job="app-server"} 600 -eval instant at 50m topk((3), (http_requests)) - expect ordered +eval_ordered instant at 50m topk((3), (http_requests)) http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="1", job="app-server"} 600 -eval instant at 50m topk(5, http_requests{group="canary",job="app-server"}) - expect ordered +eval_ordered instant at 50m topk(5, http_requests{group="canary",job="app-server"}) http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="canary", instance="0", job="app-server"} 700 -eval instant at 50m bottomk(3, http_requests) - expect ordered +eval_ordered instant at 50m bottomk(3, http_requests) http_requests{group="production", instance="0", job="api-server"} 100 http_requests{group="production", instance="1", job="api-server"} 200 http_requests{group="canary", instance="0", job="api-server"} 300 -eval instant at 50m bottomk(5, http_requests{group="canary",job="app-server"}) - expect ordered +eval_ordered instant at 50m bottomk(5, http_requests{group="canary",job="app-server"}) http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="canary", instance="1", job="app-server"} 800 @@ -322,39 +309,33 @@ eval instant at 50m bottomk by (group) (2, http_requests) http_requests{group="production", instance="0", job="api-server"} 100 http_requests{group="production", instance="1", job="api-server"} 200 -eval instant at 50m bottomk by (group) (2, http_requests{group="production"}) - expect ordered +eval_ordered instant at 50m bottomk by (group) (2, http_requests{group="production"}) http_requests{group="production", instance="0", job="api-server"} 100 http_requests{group="production", instance="1", job="api-server"} 200 # Test NaN is sorted away from the top/bottom. -eval instant at 50m topk(3, http_requests{job="api-server",group="production"}) - expect ordered +eval_ordered instant at 50m topk(3, http_requests{job="api-server",group="production"}) http_requests{job="api-server", instance="1", group="production"} 200 http_requests{job="api-server", instance="0", group="production"} 100 http_requests{job="api-server", instance="2", group="production"} NaN -eval instant at 50m bottomk(3, http_requests{job="api-server",group="production"}) - expect ordered +eval_ordered instant at 50m bottomk(3, http_requests{job="api-server",group="production"}) http_requests{job="api-server", instance="0", group="production"} 100 http_requests{job="api-server", instance="1", group="production"} 200 http_requests{job="api-server", instance="2", group="production"} NaN # Test topk and bottomk allocate min(k, input_vector) for results vector -eval instant at 50m bottomk(9999999999, http_requests{job="app-server",group="canary"}) - expect ordered +eval_ordered instant at 50m bottomk(9999999999, http_requests{job="app-server",group="canary"}) http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="canary", instance="1", job="app-server"} 800 -eval instant at 50m topk(9999999999, http_requests{job="api-server",group="production"}) - expect ordered +eval_ordered instant at 50m topk(9999999999, http_requests{job="api-server",group="production"}) http_requests{job="api-server", instance="1", group="production"} 200 http_requests{job="api-server", instance="0", group="production"} 100 http_requests{job="api-server", instance="2", group="production"} NaN # Bug #5276. -eval instant at 50m topk(scalar(foo), http_requests) - expect ordered +eval_ordered instant at 50m topk(scalar(foo), http_requests) http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="1", job="app-server"} 600 @@ -367,54 +348,42 @@ eval range from 0m to 50m step 5m count(bottomk(scalar(foo), http_requests)) {} 1 2 3 4 5 6 7 8 9 9 3 # Tests for histogram: should ignore histograms. -eval instant at 50m topk(100, http_requests_histogram) - expect info +eval_info instant at 50m topk(100, http_requests_histogram) #empty -eval range from 0 to 50m step 5m topk(100, http_requests_histogram) - expect info +eval_info range from 0 to 50m step 5m topk(100, http_requests_histogram) #empty -eval instant at 50m topk(1, {__name__=~"http_requests(_histogram)?"}) - expect info +eval_info instant at 50m topk(1, {__name__=~"http_requests(_histogram)?"}) {__name__="http_requests", group="canary", instance="1", job="app-server"} 800 -eval instant at 50m count(topk(1000, {__name__=~"http_requests(_histogram)?"})) - expect info +eval_info instant at 50m count(topk(1000, {__name__=~"http_requests(_histogram)?"})) {} 9 -eval range from 0 to 50m step 5m count(topk(1000, {__name__=~"http_requests(_histogram)?"})) - expect info +eval_info range from 0 to 50m step 5m count(topk(1000, {__name__=~"http_requests(_histogram)?"})) {} 9x10 -eval instant at 50m topk by (instance) (1, {__name__=~"http_requests(_histogram)?"}) - expect info +eval_info instant at 50m topk by (instance) (1, {__name__=~"http_requests(_histogram)?"}) {__name__="http_requests", group="canary", instance="0", job="app-server"} 700 {__name__="http_requests", group="canary", instance="1", job="app-server"} 800 {__name__="http_requests", group="production", instance="2", job="api-server"} NaN -eval instant at 50m bottomk(100, http_requests_histogram) - expect info +eval_info instant at 50m bottomk(100, http_requests_histogram) #empty -eval range from 0 to 50m step 5m bottomk(100, http_requests_histogram) - expect info +eval_info range from 0 to 50m step 5m bottomk(100, http_requests_histogram) #empty -eval instant at 50m bottomk(1, {__name__=~"http_requests(_histogram)?"}) - expect info +eval_info instant at 50m bottomk(1, {__name__=~"http_requests(_histogram)?"}) {__name__="http_requests", group="production", instance="0", job="api-server"} 100 -eval instant at 50m count(bottomk(1000, {__name__=~"http_requests(_histogram)?"})) - expect info +eval_info instant at 50m count(bottomk(1000, {__name__=~"http_requests(_histogram)?"})) {} 9 -eval range from 0 to 50m step 5m count(bottomk(1000, {__name__=~"http_requests(_histogram)?"})) - expect info +eval_info range from 0 to 50m step 5m count(bottomk(1000, {__name__=~"http_requests(_histogram)?"})) {} 9x10 -eval instant at 50m bottomk by (instance) (1, {__name__=~"http_requests(_histogram)?"}) - expect info +eval_info instant at 50m bottomk by (instance) (1, {__name__=~"http_requests(_histogram)?"}) {__name__="http_requests", group="production", instance="0", job="api-server"} 100 {__name__="http_requests", group="production", instance="1", job="api-server"} 200 {__name__="http_requests", group="production", instance="2", job="api-server"} NaN @@ -478,8 +447,8 @@ eval instant at 1m count_values by (job, group)("job", version) {job="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}", group="canary"} 2 # Test an invalid label value. -eval instant at 0 count_values("a\xc5z", version) - expect fail msg:invalid label name "a\xc5z" +eval_fail instant at 0 count_values("a\xc5z", version) + expected_fail_message invalid label name "a\xc5z" # Tests for quantile. clear @@ -493,67 +462,46 @@ load 10s data{test="uneven samples",point="a"} 0 data{test="uneven samples",point="b"} 1 data{test="uneven samples",point="c"} 4 - data{test="NaN sample",point="a"} 0 - data{test="NaN sample",point="b"} 1 - data{test="NaN sample",point="c"} NaN data_histogram{test="histogram sample", point="c"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}} foo 0 1 0 1 0 1 0.8 -# 80th percentile. -# The NaN sample is treated as the smallest possible value. eval instant at 1m quantile without(point)(0.8, data) - expect no_info {test="two samples"} 0.8 {test="three samples"} 1.6 {test="uneven samples"} 2.8 - {test="NaN sample"} 0.6 - -# 20th percentile. -# A quantile between NaN and 0 is interpolated as NaN. -eval instant at 1m quantile without(point)(0.2, data) - {test="two samples"} 0.2 - {test="three samples"} 0.4 - {test="uneven samples"} 0.4 - {test="NaN sample"} NaN # The histogram is ignored here so the result doesn't change but it has an info annotation now. -eval instant at 1m quantile without(point)(0.8, {__name__=~"data(_histogram)?"}) - expect info +eval_info instant at 1m quantile without(point)(0.8, {__name__=~"data(_histogram)?"}) {test="two samples"} 0.8 {test="three samples"} 1.6 {test="uneven samples"} 2.8 - {test="NaN sample"} 0.6 # The histogram is ignored here so there is no result but it has an info annotation now. -eval instant at 1m quantile(0.8, data_histogram) - expect info +eval_info instant at 1m quantile(0.8, data_histogram) # Bug #5276. eval instant at 1m quantile without(point)(scalar(foo), data) {test="two samples"} 0.8 {test="three samples"} 1.6 {test="uneven samples"} 2.8 - {test="NaN sample"} 0.6 + eval instant at 1m quantile without(point)((scalar(foo)), data) {test="two samples"} 0.8 {test="three samples"} 1.6 {test="uneven samples"} 2.8 - {test="NaN sample"} 0.6 eval instant at 1m quantile without(point)(NaN, data) expect warn msg: PromQL warning: quantile value should be between 0 and 1, got NaN - {test="two samples"} NaN - {test="three samples"} NaN - {test="uneven samples"} NaN - {test="NaN sample"} NaN + {test="two samples"} NaN + {test="three samples"} NaN + {test="uneven samples"} NaN # Bug #15971. eval range from 0m to 1m step 10s quantile without(point) (scalar(foo), data) {test="two samples"} 0 1 0 1 0 1 0.8 {test="three samples"} 0 2 0 2 0 2 1.6 {test="uneven samples"} 0 4 0 4 0 4 2.8 - {test="NaN sample"} NaN 1 NaN 1 NaN 1 0.6 # Tests for group. clear @@ -797,28 +745,22 @@ load 5m series{label="c"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} # The histogram is ignored here so the result doesn't change but it has an info annotation now. -eval instant at 0m stddev(series) - expect info +eval_info instant at 0m stddev(series) {} 0.5 -eval instant at 0m stdvar(series) - expect info +eval_info instant at 0m stdvar(series) {} 0.25 # The histogram is ignored here so there is no result but it has an info annotation now. -eval instant at 0m stddev({label="c"}) - expect info +eval_info instant at 0m stddev({label="c"}) -eval instant at 0m stdvar({label="c"}) - expect info +eval_info instant at 0m stdvar({label="c"}) -eval instant at 0m stddev by (label) (series) - expect info +eval_info instant at 0m stddev by (label) (series) {label="a"} 0 {label="b"} 0 -eval instant at 0m stdvar by (label) (series) - expect info +eval_info instant at 0m stdvar by (label) (series) {label="a"} 0 {label="b"} 0 @@ -829,21 +771,17 @@ load 5m series{label="b"} 1 series{label="c"} 2 -eval instant at 0m stddev(series) - expect info +eval_info instant at 0m stddev(series) {} 0.5 -eval instant at 0m stdvar(series) - expect info +eval_info instant at 0m stdvar(series) {} 0.25 -eval instant at 0m stddev by (label) (series) - expect info +eval_info instant at 0m stddev by (label) (series) {label="b"} 0 {label="c"} 0 -eval instant at 0m stdvar by (label) (series) - expect info +eval_info instant at 0m stdvar by (label) (series) {label="b"} 0 {label="c"} 0 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test index 4091f7eabf2..1ad301bdb7d 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test @@ -90,7 +90,8 @@ eval instant at 25s sum_over_time(metric{job="1"}[100] offset 50s @ 100) eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100 {job="1"} 15 -eval instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metric{job="2"}[123s] @ 200), "job", "1", "", "") +# Note that this triggers an info annotation because we are rate'ing a metric that does not end in `_total`. +eval_info instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metric{job="2"}[123s] @ 200), "job", "1", "", "") {job="1"} 0.3 eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "") diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/collision.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/collision.test index 8addf02be9b..4dcdfa4ddf7 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/collision.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/collision.test @@ -17,7 +17,6 @@ load 5m testmetric1{src="a",dst="b"} 0 testmetric2{src="a",dst="b"} 1 -eval instant at 0m ceil({__name__=~'testmetric1|testmetric2'}) - expect fail +eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'}) clear diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test index b1eda909f83..037e40923ae 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test @@ -228,48 +228,37 @@ load 5m http_requests_histogram{path="/g"} 0 0 {{schema:-53 sum:3 count:3 custom_values:[1] buckets:[3]}} {{schema:-53 sum:3 count:3 custom_values:[5 10] buckets:[3]}} eval instant at 50m irate(http_requests_total[50m]) - expect no_warn {path="/foo"} .03333333333333333333 {path="/bar"} .03333333333333333333 # Counter reset. eval instant at 30m irate(http_requests_total[50m]) - expect no_warn {path="/foo"} .03333333333333333333 {path="/bar"} 0 eval range from 0 to 20m step 5m irate(http_requests_nan[15m1s]) - expect no_warn {} _ NaN NaN NaN 0.02 eval instant at 20m irate(http_requests_histogram{path="/a"}[20m]) - expect no_warn {path="/a"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}} eval instant at 20m irate(http_requests_histogram{path="/b"}[20m]) - expect no_warn {path="/b"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}} eval instant at 20m irate(http_requests_histogram{path="/b"}[6m]) - expect no_warn -eval instant at 20m irate(http_requests_histogram{path="/c"}[20m]) - expect warn +eval_warn instant at 20m irate(http_requests_histogram{path="/c"}[20m]) {path="/c"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}} -eval instant at 20m irate(http_requests_histogram{path="/d"}[20m]) - expect warn +eval_warn instant at 20m irate(http_requests_histogram{path="/d"}[20m]) {path="/d"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}} -eval instant at 20m irate(http_requests_histogram{path="/e"}[20m]) - expect warn +eval_warn instant at 20m irate(http_requests_histogram{path="/e"}[20m]) eval instant at 20m irate(http_requests_histogram{path="/f"}[20m]) - expect no_warn {path="/f"} {{schema:-53 sum:0.01 count:0.01 custom_values:[5 10] buckets:[0.01]}} eval instant at 20m irate(http_requests_histogram{path="/g"}[20m]) - expect no_warn {path="/g"} {{schema:-53 sum:0.01 count:0.01 custom_values:[5 10] buckets:[0.01]}} clear @@ -283,22 +272,18 @@ load 5m http_requests_mix{path="/foo"} 0 50 100 {{schema:0 sum:0 count:0 buckets:[0 0 0] counter_reset_hint:gauge}} {{schema:0 sum:1 count:2 buckets:[1 1 1] counter_reset_hint:gauge}} eval instant at 20m delta(http_requests[20m]) - expect no_warn {path="/foo"} 200 {path="/bar"} -200 eval instant at 20m delta(http_requests_gauge[20m]) - expect no_warn {path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}} # delta emits warn annotation for non-gauge histogram types. -eval instant at 20m delta(http_requests_counter[20m]) - expect warn +eval_warn instant at 20m delta(http_requests_counter[20m]) {path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}} # delta emits warn annotation for mix of histogram and floats. -eval instant at 20m delta(http_requests_mix[20m]) - expect warn +eval_warn instant at 20m delta(http_requests_mix[20m]) #empty clear @@ -317,41 +302,31 @@ load 5m http_requests_histogram{path="/g"} 0 0 {{schema:-53 sum:1 count:1 custom_values:[1] buckets:[2] counter_reset_hint:gauge}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1] counter_reset_hint:gauge}} eval instant at 20m idelta(http_requests[20m]) - expect no_warn {path="/foo"} 50 {path="/bar"} -50 eval range from 0 to 20m step 5m idelta(http_requests_nan[15m1s]) - expect no_warn {} _ NaN NaN NaN 6 eval instant at 20m idelta(http_requests_histogram{path="/a"}[20m]) - expect no_warn {path="/a"} {{sum:1 count:3 counter_reset_hint:gauge}} eval instant at 20m idelta(http_requests_histogram{path="/b"}[20m]) - expect no_warn {path="/b"} {{sum:1 count:1 counter_reset_hint:gauge}} eval instant at 20m idelta(http_requests_histogram{path="/b"}[6m]) - expect no_warn -eval instant at 20m idelta(http_requests_histogram{path="/c"}[20m]) - expect warn +eval_warn instant at 20m idelta(http_requests_histogram{path="/c"}[20m]) {path="/c"} {{sum:1 count:1 counter_reset_hint:gauge}} -eval instant at 20m idelta(http_requests_histogram{path="/d"}[20m]) - expect warn +eval_warn instant at 20m idelta(http_requests_histogram{path="/d"}[20m]) {path="/d"} {{sum:1 count:1 counter_reset_hint:gauge}} -eval instant at 20m idelta(http_requests_histogram{path="/e"}[20m]) - expect warn +eval_warn instant at 20m idelta(http_requests_histogram{path="/e"}[20m]) -eval instant at 20m idelta(http_requests_histogram{path="/f"}[20m]) - expect warn +eval_warn instant at 20m idelta(http_requests_histogram{path="/f"}[20m]) -eval instant at 20m idelta(http_requests_histogram{path="/g"}[20m]) - expect warn +eval_warn instant at 20m idelta(http_requests_histogram{path="/g"}[20m]) clear @@ -366,36 +341,28 @@ load 5m # deriv should return the same as rate in simple cases. eval instant at 50m rate(http_requests_total{group="canary", instance="1", job="app-server"}[50m]) - expect no_info {group="canary", instance="1", job="app-server"} 0.26666666666666666 eval instant at 50m deriv(http_requests_total{group="canary", instance="1", job="app-server"}[50m]) - expect no_info {group="canary", instance="1", job="app-server"} 0.26666666666666666 # deriv should return correct result. eval instant at 50m deriv(testcounter_reset_middle_total[100m]) - expect no_info {} 0.010606060606060607 # deriv should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. -eval instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m]) - expect info +eval_info instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m]) {group="canary", instance="1", job="app-server"} 0.26666666666666666 -eval instant at 100m deriv(testcounter_reset_middle_mix[110m]) - expect info +eval_info instant at 100m deriv(testcounter_reset_middle_mix[110m]) {} 0.010606060606060607 # deriv should silently ignore ranges consisting only of histograms. eval instant at 50m deriv(http_requests_histogram[60m]) - expect no_info - expect no_warn #empty # deriv should return NaN in case of +Inf or -Inf found. eval instant at 100m deriv(http_requests_inf[100m]) - expect no_info {job="app-server", instance="1", group="canary"} NaN # predict_linear should return correct result. @@ -413,45 +380,35 @@ eval instant at 100m deriv(http_requests_inf[100m]) # intercept at t=3000: 38.63636363636364 # intercept at t=3000+3600: 76.81818181818181 eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 3600) - expect no_info {} 70 eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 1h) - expect no_info {} 70 # intercept at t = 3000+3600 = 6600 eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) - expect no_info {} 76.81818181818181 eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 1h) - expect no_info {} 76.81818181818181 # intercept at t = 600+3600 = 4200 eval instant at 10m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) - expect no_info {} 51.36363636363637 # intercept at t = 4200+3600 = 7800 eval instant at 70m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) - expect no_info {} 89.54545454545455 # predict_linear should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. -eval instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 3000) - expect info +eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 3000) {} 70 -eval instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 50m) - expect info +eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 50m) {} 70 # predict_linear should silently ignore ranges consisting only of histograms. eval instant at 60m predict_linear(http_requests_histogram[60m], 50m) - expect no_info - expect no_warn #empty # predict_linear should return NaN in case of +Inf or -Inf found. @@ -514,16 +471,13 @@ eval instant at 0m label_replace(testmetric, "dst", "", "dst", ".*") testmetric{src="source-value-20"} 1 # label_replace fails when the regex is invalid. -eval instant at 0m label_replace(testmetric, "dst", "value-$1", "src", "(.*") - expect fail +eval_fail instant at 0m label_replace(testmetric, "dst", "value-$1", "src", "(.*") # label_replace fails when the destination label name is not a valid Prometheus label name. -eval instant at 0m label_replace(testmetric, "\xff", "", "src", "(.*)") - expect fail +eval_fail instant at 0m label_replace(testmetric, "\xff", "", "src", "(.*)") # label_replace fails when there would be duplicated identical output label sets. -eval instant at 0m label_replace(testmetric, "src", "", "", "") - expect fail +eval_fail instant at 0m label_replace(testmetric, "src", "", "", "") clear @@ -586,8 +540,8 @@ eval instant at 0m label_join(testmetric1, "dst", ", ", "src", "src1", "src2") testmetric1{src="foo",src1="bar",src2="foobar",dst="foo, bar, foobar"} 0 testmetric1{src="fizz",src1="buzz",src2="fizzbuzz",dst="fizz, buzz, fizzbuzz"} 1 -eval instant at 0m label_join(dup, "label", "", "this") - expect fail msg:vector cannot contain metrics with the same labelset +eval_fail instant at 0m label_join(dup, "label", "", "this") + expected_fail_message vector cannot contain metrics with the same labelset clear @@ -698,8 +652,7 @@ load 5m http_requests{job="app-server", instance="1", group="canary"} 0+80x10 http_requests{job="app-server", instance="2", group="canary"} {{schema:0 sum:1 count:1}}x15 -eval instant at 50m sort(http_requests) - expect ordered +eval_ordered instant at 50m sort(http_requests) http_requests{group="production", instance="0", job="api-server"} 100 http_requests{group="production", instance="1", job="api-server"} 200 http_requests{group="canary", instance="0", job="api-server"} 300 @@ -710,8 +663,7 @@ eval instant at 50m sort(http_requests) http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="canary", instance="2", job="api-server"} NaN -eval instant at 50m sort_desc(http_requests) - expect ordered +eval_ordered instant at 50m sort_desc(http_requests) http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="1", job="app-server"} 600 @@ -749,8 +701,7 @@ load 5m node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 0+10x10 node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 0+10x10 -eval instant at 50m sort_by_label(http_requests, "instance") - expect ordered +eval_ordered instant at 50m sort_by_label(http_requests, "instance") http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="0", job="api-server"} 100 @@ -762,8 +713,7 @@ eval instant at 50m sort_by_label(http_requests, "instance") http_requests{group="canary", instance="2", job="api-server"} NaN http_requests{group="production", instance="2", job="api-server"} 100 -eval instant at 50m sort_by_label(http_requests, "instance", "group") - expect ordered +eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group") http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="0", job="api-server"} 100 @@ -775,8 +725,7 @@ eval instant at 50m sort_by_label(http_requests, "instance", "group") http_requests{group="canary", instance="2", job="api-server"} NaN http_requests{group="production", instance="2", job="api-server"} 100 -eval instant at 50m sort_by_label(http_requests, "instance", "group") - expect ordered +eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group") http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="0", job="api-server"} 100 @@ -788,8 +737,7 @@ eval instant at 50m sort_by_label(http_requests, "instance", "group") http_requests{group="canary", instance="2", job="api-server"} NaN http_requests{group="production", instance="2", job="api-server"} 100 -eval instant at 50m sort_by_label(http_requests, "group", "instance", "job") - expect ordered +eval_ordered instant at 50m sort_by_label(http_requests, "group", "instance", "job") http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="canary", instance="1", job="api-server"} 400 @@ -801,8 +749,7 @@ eval instant at 50m sort_by_label(http_requests, "group", "instance", "job") http_requests{group="production", instance="1", job="app-server"} 600 http_requests{group="production", instance="2", job="api-server"} 100 -eval instant at 50m sort_by_label(http_requests, "job", "instance", "group") - expect ordered +eval_ordered instant at 50m sort_by_label(http_requests, "job", "instance", "group") http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="production", instance="0", job="api-server"} 100 http_requests{group="canary", instance="1", job="api-server"} 400 @@ -814,8 +761,7 @@ eval instant at 50m sort_by_label(http_requests, "job", "instance", "group") http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="production", instance="1", job="app-server"} 600 -eval instant at 50m sort_by_label_desc(http_requests, "instance") - expect ordered +eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance") http_requests{group="production", instance="2", job="api-server"} 100 http_requests{group="canary", instance="2", job="api-server"} NaN http_requests{group="production", instance="1", job="app-server"} 600 @@ -827,8 +773,7 @@ eval instant at 50m sort_by_label_desc(http_requests, "instance") http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="canary", instance="0", job="api-server"} 300 -eval instant at 50m sort_by_label_desc(http_requests, "instance", "group") - expect ordered +eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group") http_requests{group="production", instance="2", job="api-server"} 100 http_requests{group="canary", instance="2", job="api-server"} NaN http_requests{group="production", instance="1", job="app-server"} 600 @@ -840,8 +785,7 @@ eval instant at 50m sort_by_label_desc(http_requests, "instance", "group") http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="canary", instance="0", job="api-server"} 300 -eval instant at 50m sort_by_label_desc(http_requests, "instance", "group", "job") - expect ordered +eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group", "job") http_requests{group="production", instance="2", job="api-server"} 100 http_requests{group="canary", instance="2", job="api-server"} NaN http_requests{group="production", instance="1", job="app-server"} 600 @@ -853,8 +797,7 @@ eval instant at 50m sort_by_label_desc(http_requests, "instance", "group", "job" http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="canary", instance="0", job="api-server"} 300 -eval instant at 50m sort_by_label(cpu_time_total, "cpu") - expect ordered +eval_ordered instant at 50m sort_by_label(cpu_time_total, "cpu") cpu_time_total{job="cpu", cpu="0"} 100 cpu_time_total{job="cpu", cpu="1"} 100 cpu_time_total{job="cpu", cpu="2"} 100 @@ -866,14 +809,12 @@ eval instant at 50m sort_by_label(cpu_time_total, "cpu") cpu_time_total{job="cpu", cpu="21"} 100 cpu_time_total{job="cpu", cpu="100"} 100 -eval instant at 50m sort_by_label(node_uname_info, "instance") - expect ordered +eval_ordered instant at 50m sort_by_label(node_uname_info, "instance") node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100 node_uname_info{job="node_exporter", instance="4m600", release="1.2.3"} 100 node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100 -eval instant at 50m sort_by_label(node_uname_info, "release") - expect ordered +eval_ordered instant at 50m sort_by_label(node_uname_info, "release") node_uname_info{job="node_exporter", instance="4m600", release="1.2.3"} 100 node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100 node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100 @@ -894,15 +835,13 @@ load 10s http_requests_histogram{job="api-server", instance="1", group="canary"} {{schema:0 count:1 sum:2}}x1000 eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) - expect no_info {job="api-server", instance="0", group="production"} 8000 {job="api-server", instance="1", group="production"} 16000 {job="api-server", instance="0", group="canary"} 24000 {job="api-server", instance="1", group="canary"} 32000 # double_exponential_smoothing should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. -eval instant at 20010s double_exponential_smoothing(http_requests_mix[1m], 0.01, 0.1) - expect info +eval_info instant at 20010s double_exponential_smoothing(http_requests_mix[1m], 0.01, 0.1) {job="api-server", instance="0", group="production"} 30100 {job="api-server", instance="1", group="production"} 30200 {job="api-server", instance="0", group="canary"} 80300 @@ -910,7 +849,6 @@ eval instant at 20010s double_exponential_smoothing(http_requests_mix[1m], 0.01, # double_exponential_smoothing should silently ignore ranges consisting only of histograms. eval instant at 10000s double_exponential_smoothing(http_requests_histogram[1m], 0.01, 0.1) - expect no_info #empty # negative trends @@ -1056,12 +994,10 @@ eval instant at 55s sum_over_time(metric11[1m])/count_over_time(metric11[1m]) {} NaN # Tests for samples with mix of floats and histograms. -eval instant at 55s sum_over_time(metric12[1m]) - expect warn +eval_warn instant at 55s sum_over_time(metric12[1m]) # no result. -eval instant at 55s avg_over_time(metric12[1m]) - expect warn +eval_warn instant at 55s avg_over_time(metric12[1m]) # no result. # Tests for samples with only histograms. @@ -1248,16 +1184,13 @@ eval instant at 1m stddev_over_time((metric[2m])) eval instant at 1m stddev_over_time(metric_histogram{type="only_histogram"}[2m]) #empty -eval instant at 1m stddev_over_time(metric_histogram{type="mix"}[2m]) - expect info +eval_info instant at 1m stddev_over_time(metric_histogram{type="mix"}[2m]) {type="mix"} 0 eval instant at 1m stdvar_over_time(metric_histogram{type="only_histogram"}[2m]) - expect no_info #empty -eval instant at 1m stdvar_over_time(metric_histogram{type="mix"}[2m]) - expect info +eval_info instant at 1m stdvar_over_time(metric_histogram{type="mix"}[2m]) {type="mix"} 0 # Tests for stddev_over_time and stdvar_over_time #4927. @@ -1279,15 +1212,12 @@ load 10s metric_histogram{type="mix"} 1 1 1 {{schema:1 sum:2 count:3}} {{schema:1 sum:2 count:3}} eval instant at 70s mad_over_time(metric[70s]) - expect no_info {} 1 eval instant at 70s mad_over_time(metric_histogram{type="only_histogram"}[70s]) - expect no_info #empty -eval instant at 70s mad_over_time(metric_histogram{type="mix"}[70s]) - expect info +eval_info instant at 70s mad_over_time(metric_histogram{type="mix"}[70s]) {type="mix"} 0 # Tests for ts_of_max_over_time and ts_of_min_over_time. Using odd scrape interval to test for rounding bugs. @@ -1331,69 +1261,49 @@ load 10s data_histogram{test="mix samples"} 0 1 2 {{schema:0 sum:1 count:2}}x2 eval instant at 1m quantile_over_time(0, data[2m]) - expect no_info - expect no_warn {test="two samples"} 0 {test="three samples"} 0 {test="uneven samples"} 0 eval instant at 1m quantile_over_time(0.5, data[2m]) - expect no_info - expect no_warn {test="two samples"} 0.5 {test="three samples"} 1 {test="uneven samples"} 1 eval instant at 1m quantile_over_time(0.75, data[2m]) - expect no_info - expect no_warn {test="two samples"} 0.75 {test="three samples"} 1.5 {test="uneven samples"} 2.5 eval instant at 1m quantile_over_time(0.8, data[2m]) - expect no_info - expect no_warn {test="two samples"} 0.8 {test="three samples"} 1.6 {test="uneven samples"} 2.8 eval instant at 1m quantile_over_time(1, data[2m]) - expect no_info - expect no_warn {test="two samples"} 1 {test="three samples"} 2 {test="uneven samples"} 4 -eval instant at 1m quantile_over_time(-1, data[2m]) - expect no_info - expect warn +eval_warn instant at 1m quantile_over_time(-1, data[2m]) {test="two samples"} -Inf {test="three samples"} -Inf {test="uneven samples"} -Inf -eval instant at 1m quantile_over_time(2, data[2m]) - expect no_info - expect warn +eval_warn instant at 1m quantile_over_time(2, data[2m]) {test="two samples"} +Inf {test="three samples"} +Inf {test="uneven samples"} +Inf -eval instant at 1m (quantile_over_time(2, (data[2m]))) - expect no_info - expect warn +eval_warn instant at 1m (quantile_over_time(2, (data[2m]))) {test="two samples"} +Inf {test="three samples"} +Inf {test="uneven samples"} +Inf eval instant at 1m quantile_over_time(0.5, data_histogram{test="only histogram samples"}[2m]) - expect no_info - expect no_warn #empty -eval instant at 1m quantile_over_time(0.5, data_histogram{test="mix samples"}[2m]) - expect info - expect no_warn +eval_info instant at 1m quantile_over_time(0.5, data_histogram{test="mix samples"}[2m]) {test="mix samples"} 1 clear @@ -1507,8 +1417,7 @@ load 5m testmetric1{src="a",dst="b"} 0 testmetric2{src="a",dst="b"} 1 -eval instant at 0m changes({__name__=~'testmetric1|testmetric2'}[5m]) - expect fail +eval_fail instant at 0m changes({__name__=~'testmetric1|testmetric2'}[5m]) clear @@ -1523,7 +1432,6 @@ load 10s data_histogram{type="mix_samples"} 0 1 {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}} eval instant at 1m min_over_time(data[2m]) - expect no_info {type="numbers"} 0 {type="some_nan"} 0 {type="some_nan2"} 1 @@ -1531,15 +1439,12 @@ eval instant at 1m min_over_time(data[2m]) {type="only_nan"} NaN eval instant at 1m min_over_time(data_histogram{type="only_histogram"}[2m]) - expect no_info #empty -eval instant at 1m min_over_time(data_histogram{type="mix_samples"}[2m]) - expect info +eval_info instant at 1m min_over_time(data_histogram{type="mix_samples"}[2m]) {type="mix_samples"} 0 eval instant at 1m max_over_time(data[2m]) - expect no_info {type="numbers"} 3 {type="some_nan"} 2 {type="some_nan2"} 2 @@ -1547,15 +1452,12 @@ eval instant at 1m max_over_time(data[2m]) {type="only_nan"} NaN eval instant at 1m max_over_time(data_histogram{type="only_histogram"}[2m]) - expect no_info #empty -eval instant at 1m max_over_time(data_histogram{type="mix_samples"}[2m]) - expect info +eval_info instant at 1m max_over_time(data_histogram{type="mix_samples"}[2m]) {type="mix_samples"} 1 eval instant at 1m last_over_time({__name__=~"data(_histogram)?"}[2m]) - expect no_info data{type="numbers"} 3 data{type="some_nan"} NaN data{type="some_nan2"} 1 @@ -1565,7 +1467,6 @@ eval instant at 1m last_over_time({__name__=~"data(_histogram)?"}[2m]) data_histogram{type="mix_samples"} {{schema:0 sum:2 count:3}} eval instant at 1m count_over_time({__name__=~"data(_histogram)?"}[2m]) - expect no_info {type="numbers"} 3 {type="some_nan"} 3 {type="some_nan2"} 3 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test index 84a467a3145..bf13864277d 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test @@ -70,91 +70,74 @@ load_with_nhcb 5m # Test histogram_count. eval instant at 50m histogram_count(testhistogram3) - expect no_warn {start="positive"} 110 {start="negative"} 20 # Classic way of accessing the count still works. eval instant at 50m testhistogram3_count - expect no_warn testhistogram3_count{start="positive"} 110 testhistogram3_count{start="negative"} 20 # Test histogram_sum. eval instant at 50m histogram_sum(testhistogram3) - expect no_warn {start="positive"} 330 {start="negative"} 80 # Classic way of accessing the sum still works. eval instant at 50m testhistogram3_sum - expect no_warn testhistogram3_sum{start="positive"} 330 testhistogram3_sum{start="negative"} 80 # Test histogram_avg. This has no classic equivalent. eval instant at 50m histogram_avg(testhistogram3) - expect no_warn {start="positive"} 3 {start="negative"} 4 # Test histogram_stddev. This has no classic equivalent. eval instant at 50m histogram_stddev(testhistogram3) - expect no_warn {start="positive"} 2.7435461458749795 {start="negative"} 4.187667907081458 # Test histogram_stdvar. This has no classic equivalent. eval instant at 50m histogram_stdvar(testhistogram3) - expect no_warn {start="positive"} 7.527045454545455 {start="negative"} 17.5365625 # Test histogram_fraction. # eval instant at 50m histogram_fraction(0, 4, testhistogram2) - expect no_warn {} 0.6666666666666666 eval instant at 50m histogram_fraction(0, 4, testhistogram2_bucket) - expect no_warn {} 0.6666666666666666 eval instant at 50m histogram_fraction(0, 6, testhistogram2) - expect no_warn {} 1 eval instant at 50m histogram_fraction(0, 6, testhistogram2_bucket) - expect no_warn {} 1 eval instant at 50m histogram_fraction(0, 3.5, testhistogram2) - expect no_warn {} 0.5833333333333334 eval instant at 50m histogram_fraction(0, 3.5, testhistogram2_bucket) - expect no_warn {} 0.5833333333333334 eval instant at 50m histogram_fraction(0, 0.2, testhistogram3) - expect no_warn {start="positive"} 0.6363636363636364 {start="negative"} 0 eval instant at 50m histogram_fraction(0, 0.2, testhistogram3_bucket) - expect no_warn {start="positive"} 0.6363636363636364 {start="negative"} 0 eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m])) - expect no_warn {start="positive"} 0.6363636363636364 {start="negative"} 0 eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3_bucket[10m])) - expect no_warn {start="positive"} 0.6363636363636364 {start="negative"} 0 @@ -162,98 +145,80 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3_bucket[10m])) # it exists) and divide by the count to get the same result. eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count - expect no_warn {start="positive"} 0.6363636363636364 eval instant at 50m rate(testhistogram3_bucket{le=".2"}[10m]) / ignoring(le) rate(testhistogram3_count[10m]) - expect no_warn {start="positive"} 0.6363636363636364 # Test histogram_quantile, native and classic. eval instant at 50m histogram_quantile(0, testhistogram3) - expect no_warn {start="positive"} 0 {start="negative"} -0.25 eval instant at 50m histogram_quantile(0, testhistogram3_bucket) - expect no_warn {start="positive"} 0 {start="negative"} -0.25 eval instant at 50m histogram_quantile(0.25, testhistogram3) - expect no_warn {start="positive"} 0.055 {start="negative"} -0.225 eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket) - expect no_warn {start="positive"} 0.055 {start="negative"} -0.225 eval instant at 50m histogram_quantile(0.5, testhistogram3) - expect no_warn {start="positive"} 0.125 {start="negative"} -0.2 eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket) - expect no_warn {start="positive"} 0.125 {start="negative"} -0.2 eval instant at 50m histogram_quantile(0.75, testhistogram3) - expect no_warn {start="positive"} 0.45 {start="negative"} -0.15 eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket) - expect no_warn {start="positive"} 0.45 {start="negative"} -0.15 eval instant at 50m histogram_quantile(1, testhistogram3) - expect no_warn {start="positive"} 1 {start="negative"} -0.1 eval instant at 50m histogram_quantile(1, testhistogram3_bucket) - expect no_warn {start="positive"} 1 {start="negative"} -0.1 # Quantile too low. -eval instant at 50m histogram_quantile(-0.1, testhistogram) - expect warn +eval_warn instant at 50m histogram_quantile(-0.1, testhistogram) {start="positive"} -Inf {start="negative"} -Inf -eval instant at 50m histogram_quantile(-0.1, testhistogram_bucket) - expect warn +eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket) {start="positive"} -Inf {start="negative"} -Inf # Quantile too high. -eval instant at 50m histogram_quantile(1.01, testhistogram) - expect warn +eval_warn instant at 50m histogram_quantile(1.01, testhistogram) {start="positive"} +Inf {start="negative"} +Inf -eval instant at 50m histogram_quantile(1.01, testhistogram_bucket) - expect warn +eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket) {start="positive"} +Inf {start="negative"} +Inf # Quantile invalid. -eval instant at 50m histogram_quantile(NaN, testhistogram) - expect warn +eval_warn instant at 50m histogram_quantile(NaN, testhistogram) {start="positive"} NaN {start="negative"} NaN -eval instant at 50m histogram_quantile(NaN, testhistogram_bucket) - expect warn +eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket) {start="positive"} NaN {start="negative"} NaN @@ -263,244 +228,196 @@ eval instant at 50m histogram_quantile(NaN, non_existent) # Quantile value in lowest bucket. eval instant at 50m histogram_quantile(0, testhistogram) - expect no_warn {start="positive"} 0 {start="negative"} -0.2 eval instant at 50m histogram_quantile(0, testhistogram_bucket) - expect no_warn {start="positive"} 0 {start="negative"} -0.2 # Quantile value in highest bucket. eval instant at 50m histogram_quantile(1, testhistogram) - expect no_warn {start="positive"} 1 {start="negative"} 0.3 eval instant at 50m histogram_quantile(1, testhistogram_bucket) - expect no_warn {start="positive"} 1 {start="negative"} 0.3 # Finally some useful quantiles. eval instant at 50m histogram_quantile(0.2, testhistogram) - expect no_warn {start="positive"} 0.048 {start="negative"} -0.2 eval instant at 50m histogram_quantile(0.2, testhistogram_bucket) - expect no_warn {start="positive"} 0.048 {start="negative"} -0.2 eval instant at 50m histogram_quantile(0.5, testhistogram) - expect no_warn {start="positive"} 0.15 {start="negative"} -0.15 eval instant at 50m histogram_quantile(0.5, testhistogram_bucket) - expect no_warn {start="positive"} 0.15 {start="negative"} -0.15 eval instant at 50m histogram_quantile(0.8, testhistogram) - expect no_warn {start="positive"} 0.72 {start="negative"} 0.3 eval instant at 50m histogram_quantile(0.8, testhistogram_bucket) - expect no_warn {start="positive"} 0.72 {start="negative"} 0.3 # More realistic with rates. eval instant at 50m histogram_quantile(0.2, rate(testhistogram[10m])) - expect no_warn {start="positive"} 0.048 {start="negative"} -0.2 eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m])) - expect no_warn {start="positive"} 0.048 {start="negative"} -0.2 eval instant at 50m histogram_quantile(0.5, rate(testhistogram[10m])) - expect no_warn {start="positive"} 0.15 {start="negative"} -0.15 eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m])) - expect no_warn {start="positive"} 0.15 {start="negative"} -0.15 eval instant at 50m histogram_quantile(0.8, rate(testhistogram[10m])) - expect no_warn {start="positive"} 0.72 {start="negative"} 0.3 eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m])) - expect no_warn {start="positive"} 0.72 {start="negative"} 0.3 # Want results exactly in the middle of the bucket. eval instant at 7m histogram_quantile(1./6., testhistogram2) - expect no_warn {} 1 eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket) - expect no_warn {} 1 eval instant at 7m histogram_quantile(0.5, testhistogram2) - expect no_warn {} 3 eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket) - expect no_warn {} 3 eval instant at 7m histogram_quantile(5./6., testhistogram2) - expect no_warn {} 5 eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket) - expect no_warn {} 5 eval instant at 47m histogram_quantile(1./6., rate(testhistogram2[15m])) - expect no_warn {} 1 eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m])) - expect no_warn {} 1 eval instant at 47m histogram_quantile(0.5, rate(testhistogram2[15m])) - expect no_warn {} 3 eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m])) - expect no_warn {} 3 eval instant at 47m histogram_quantile(5./6., rate(testhistogram2[15m])) - expect no_warn {} 5 eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) - expect no_warn {} 5 # Aggregated histogram: Everything in one. Note how native histograms # don't require aggregation by le. eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m]))) - expect no_warn {} 0.075 eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le)) - expect no_warn {} 0.075 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m]))) - expect no_warn {} 0.1277777777777778 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le)) - expect no_warn {} 0.1277777777777778 # Aggregated histogram: Everything in one. Now with avg, which does not change anything. eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[10m]))) - expect no_warn {} 0.075 eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le)) - expect no_warn {} 0.075 eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[10m]))) - expect no_warn {} 0.12777777777777778 eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le)) - expect no_warn {} 0.12777777777777778 # Aggregated histogram: By instance. eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (instance)) - expect no_warn {instance="ins1"} 0.075 {instance="ins2"} 0.075 eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) - expect no_warn {instance="ins1"} 0.075 {instance="ins2"} 0.075 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (instance)) - expect no_warn {instance="ins1"} 0.1333333333 {instance="ins2"} 0.125 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) - expect no_warn {instance="ins1"} 0.1333333333 {instance="ins2"} 0.125 # Aggregated histogram: By job. eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job)) - expect no_warn {job="job1"} 0.1 {job="job2"} 0.0642857142857143 eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) - expect no_warn {job="job1"} 0.1 {job="job2"} 0.0642857142857143 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job)) - expect no_warn {job="job1"} 0.14 {job="job2"} 0.1125 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) - expect no_warn {job="job1"} 0.14 {job="job2"} 0.1125 # Aggregated histogram: By job and instance. eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job, instance)) - expect no_warn {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) - expect no_warn {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job, instance)) - expect no_warn {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.1333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.1166666666666667 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) - expect no_warn {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.1333333333333333 {instance="ins1", job="job2"} 0.1 @@ -508,28 +425,24 @@ eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bu # The unaggregated histogram for comparison. Same result as the previous one. eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[10m])) - expect no_warn {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m])) - expect no_warn {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[10m])) - expect no_warn {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.13333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.11666666666666667 eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m])) - expect no_warn {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.13333333333333333 {instance="ins1", job="job2"} 0.1 @@ -537,32 +450,25 @@ eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket # All NHCBs summed into one. eval instant at 50m sum(request_duration_seconds) - expect no_warn {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} eval instant at 50m sum(request_duration_seconds{job="job1",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job1",instance="ins2"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins2"}) - expect no_warn {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} eval instant at 50m avg(request_duration_seconds) - expect no_warn {} {{schema:-53 count:62.5 custom_values:[0.1 0.2] buckets:[25 22.5 15]}} # To verify the result above, calculate from classic histogram as well. eval instant at 50m avg (request_duration_seconds_bucket{le="0.1"}) - expect no_warn {} 25 eval instant at 50m avg (request_duration_seconds_bucket{le="0.2"}) - avg (request_duration_seconds_bucket{le="0.1"}) - expect no_warn {} 22.5 eval instant at 50m avg (request_duration_seconds_bucket{le="+Inf"}) - avg (request_duration_seconds_bucket{le="0.2"}) - expect no_warn {} 15 eval instant at 50m count(request_duration_seconds) - expect no_warn {} 4 # A histogram with nonmonotonic bucket counts. This may happen when recording @@ -578,16 +484,13 @@ load 5m nonmonotonic_bucket{le="+Inf"} 0+8x10 # Nonmonotonic buckets, triggering an info annotation. -eval instant at 50m histogram_quantile(0.01, nonmonotonic_bucket) - expect info +eval_info instant at 50m histogram_quantile(0.01, nonmonotonic_bucket) {} 0.0045 -eval instant at 50m histogram_quantile(0.5, nonmonotonic_bucket) - expect info +eval_info instant at 50m histogram_quantile(0.5, nonmonotonic_bucket) {} 8.5 -eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) - expect info +eval_info instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) {} 979.75 # Buckets with different representations of the same upper bound. @@ -622,11 +525,9 @@ load_with_nhcb 5m request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10 request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10 -eval instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"}) - expect fail +eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"}) -eval instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"}) - expect fail +eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"}) # Histogram with constant buckets. load_with_nhcb 1m diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/limit.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/limit.test index 3af8d3b364e..484760cc85c 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/limit.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/limit.test @@ -15,14 +15,10 @@ load 5m bar 0 1 0 -1 0 1 0 -1 0 1 0 eval instant at 50m count(limitk by (group) (0, http_requests)) - expect no_info - expect no_warn - # empty +# empty eval instant at 50m count(limitk by (group) (-1, http_requests)) - expect no_info - expect no_warn - # empty +# empty # Exercise k==1 special case (as sample is added before the main series loop). eval instant at 50m count(limitk by (group) (1, http_requests) and http_requests) diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test index 48cdb9ba4e9..9af45a73240 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test @@ -73,8 +73,7 @@ eval instant at 10m sum by (__name__, env) (metric_total{env="1"}) # Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label). # This is an accidental side effect of delayed __name__ label dropping -eval instant at 10m sum by (__name__) (rate({env="1"}[10m])) - expect fail +eval_fail instant at 10m sum by (__name__) (rate({env="1"}[10m])) # Aggregation operators aggregate metrics with same labelset and to-be-dropped names. # This is an accidental side effect of delayed __name__ label dropping diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test index e38e003b3f4..c4ffcba034e 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test @@ -398,44 +398,35 @@ clear load 10m histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 -eval instant at 10m histogram_quantile(1.001, histogram_quantile_1) - expect warn +eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_1) {} Inf eval instant at 10m histogram_quantile(1, histogram_quantile_1) - expect no_warn {} 16 # The following quantiles are within a bucket. Exponential # interpolation is applied (rather than linear, as it is done for # classic histograms), leading to slightly different quantile values. eval instant at 10m histogram_quantile(0.99, histogram_quantile_1) - expect no_warn {} 15.67072476139083 eval instant at 10m histogram_quantile(0.9, histogram_quantile_1) - expect no_warn {} 12.99603834169977 eval instant at 10m histogram_quantile(0.6, histogram_quantile_1) - expect no_warn {} 4.594793419988138 eval instant at 10m histogram_quantile(0.5, histogram_quantile_1) - expect no_warn {} 1.5874010519681994 # Linear interpolation within the zero bucket after all. eval instant at 10m histogram_quantile(0.1, histogram_quantile_1) - expect no_warn {} 0.0006 eval instant at 10m histogram_quantile(0, histogram_quantile_1) - expect no_warn {} 0 -eval instant at 10m histogram_quantile(-1, histogram_quantile_1) - expect warn +eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_1) {} -Inf clear @@ -444,39 +435,31 @@ clear load 10m histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1 -eval instant at 10m histogram_quantile(1.001, histogram_quantile_2) - expect warn +eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_2) {} Inf eval instant at 10m histogram_quantile(1, histogram_quantile_2) - expect no_warn {} 0 # Again, the quantile values here are slightly different from what # they would be with linear interpolation. Note that quantiles # ending up in the zero bucket are linearly interpolated after all. eval instant at 10m histogram_quantile(0.99, histogram_quantile_2) - expect no_warn {} -0.00006 eval instant at 10m histogram_quantile(0.9, histogram_quantile_2) - expect no_warn {} -0.0006 eval instant at 10m histogram_quantile(0.5, histogram_quantile_2) - expect no_warn {} -1.5874010519681996 eval instant at 10m histogram_quantile(0.1, histogram_quantile_2) - expect no_warn {} -12.996038341699768 eval instant at 10m histogram_quantile(0, histogram_quantile_2) - expect no_warn {} -16 -eval instant at 10m histogram_quantile(-1, histogram_quantile_2) - expect warn +eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2) {} -Inf clear @@ -487,59 +470,46 @@ clear load 10m histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 -eval instant at 10m histogram_quantile(1.001, histogram_quantile_3) - expect warn +eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_3) {} Inf eval instant at 10m histogram_quantile(1, histogram_quantile_3) - expect no_warn {} 16 eval instant at 10m histogram_quantile(0.99, histogram_quantile_3) - expect no_warn {} 15.34822590920423 eval instant at 10m histogram_quantile(0.9, histogram_quantile_3) - expect no_warn {} 10.556063286183155 eval instant at 10m histogram_quantile(0.7, histogram_quantile_3) - expect no_warn {} 1.2030250360821164 # Linear interpolation in the zero bucket, symmetrically centered around # the zero point. eval instant at 10m histogram_quantile(0.55, histogram_quantile_3) - expect no_warn {} 0.0006 eval instant at 10m histogram_quantile(0.5, histogram_quantile_3) - expect no_warn {} 0 eval instant at 10m histogram_quantile(0.45, histogram_quantile_3) - expect no_warn {} -0.0006 # Finally negative buckets with mirrored exponential interpolation. eval instant at 10m histogram_quantile(0.3, histogram_quantile_3) - expect no_warn {} -1.2030250360821169 eval instant at 10m histogram_quantile(0.1, histogram_quantile_3) - expect no_warn {} -10.556063286183155 eval instant at 10m histogram_quantile(0.01, histogram_quantile_3) - expect no_warn {} -15.34822590920423 eval instant at 10m histogram_quantile(0, histogram_quantile_3) - expect no_warn {} -16 -eval instant at 10m histogram_quantile(-1, histogram_quantile_3) - expect warn +eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3) {} -Inf clear @@ -939,84 +909,63 @@ load 10m float_series_0 0+0x1 eval instant at 10m histogram_mul_div*3 - expect no_info {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m histogram_mul_div*-1 - expect no_info {} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}} eval instant at 10m -histogram_mul_div - expect no_info {} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}} eval instant at 10m histogram_mul_div*-3 - expect no_info {} {{schema:0 count:-90 sum:-99 z_bucket:-9 z_bucket_w:0.001 buckets:[-9 -9 -9] n_buckets:[-18 -18 -18]}} eval instant at 10m 3*histogram_mul_div - expect no_info {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m histogram_mul_div*float_series_3 - expect no_info {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m float_series_3*histogram_mul_div - expect no_info {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m histogram_mul_div/3 - expect no_info {} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}} eval instant at 10m histogram_mul_div/-3 - expect no_info {} {{schema:0 count:-10 sum:-11 z_bucket:-1 z_bucket_w:0.001 buckets:[-1 -1 -1] n_buckets:[-2 -2 -2]}} eval instant at 10m histogram_mul_div/float_series_3 - expect no_info {} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}} eval instant at 10m histogram_mul_div*0 - expect no_info {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} eval instant at 10m 0*histogram_mul_div - expect no_info {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} eval instant at 10m histogram_mul_div*float_series_0 - expect no_info {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} eval instant at 10m float_series_0*histogram_mul_div - expect no_info {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} eval instant at 10m histogram_mul_div/0 - expect no_info {} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}} eval instant at 10m histogram_mul_div/float_series_0 - expect no_info {} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}} eval instant at 10m histogram_mul_div*0/0 - expect no_info {} {{schema:0 count:NaN sum:NaN z_bucket_w:0.001 z_bucket:NaN}} -eval instant at 10m histogram_mul_div*histogram_mul_div - expect info +eval_info instant at 10m histogram_mul_div*histogram_mul_div -eval instant at 10m histogram_mul_div/histogram_mul_div - expect info +eval_info instant at 10m histogram_mul_div/histogram_mul_div -eval instant at 10m float_series_3/histogram_mul_div - expect info +eval_info instant at 10m float_series_3/histogram_mul_div -eval instant at 10m 0/histogram_mul_div - expect info +eval_info instant at 10m 0/histogram_mul_div clear @@ -1027,17 +976,13 @@ load 10m histogram_sample {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 float_sample 0x1 -eval instant at 10m float_sample+histogram_sample - expect info +eval_info instant at 10m float_sample+histogram_sample -eval instant at 10m histogram_sample+float_sample - expect info +eval_info instant at 10m histogram_sample+float_sample -eval instant at 10m float_sample-histogram_sample - expect info +eval_info instant at 10m float_sample-histogram_sample -eval instant at 10m histogram_sample-float_sample - expect info +eval_info instant at 10m histogram_sample-float_sample # Counter reset only noticeable in a single bucket. load 5m @@ -1075,13 +1020,11 @@ load 30s some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}} # Test the case where we only have two points for rate -eval instant at 30s rate(some_metric[1m]) - expect warn +eval_warn instant at 30s rate(some_metric[1m]) {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} # Test the case where we have more than two points for rate -eval instant at 1m rate(some_metric[1m30s]) - expect warn +eval_warn instant at 1m rate(some_metric[1m30s]) {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} clear @@ -1091,24 +1034,18 @@ load 30s some_metric {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} # Start and end with exponential, with custom in the middle. -eval instant at 1m rate(some_metric[1m30s]) - expect warn +eval_warn instant at 1m rate(some_metric[1m30s]) # Should produce no results. # Start and end with custom, with exponential in the middle. -eval instant at 1m30s rate(some_metric[1m30s]) - expect warn +eval_warn instant at 1m30s rate(some_metric[1m30s]) # Should produce no results. -# Start with custom, end with exponential. Return the exponential histogram divided by 48. -# (The 1st sample is the NHCB with count:1. It is mostly ignored with the exception of the -# count, which means the rate calculation extrapolates until the count hits 0.) +# Start with custom, end with exponential. Return the exponential histogram divided by 30. eval instant at 1m rate(some_metric[1m]) - {} {{count:0.08333333333333333 sum:0.10416666666666666 counter_reset_hint:gauge buckets:[0.020833333333333332 0.041666666666666664 0.020833333333333332]}} + {} {{schema:0 sum:0.16666666666666666 count:0.13333333333333333 buckets:[0.03333333333333333 0.06666666666666667 0.03333333333333333]}} # Start with exponential, end with custom. Return the custom buckets histogram divided by 30. -# (With the 2nd sample having a count of 1, the extrapolation to zero lands exactly at the -# left boundary of the range, so no extrapolation limitation needed.) eval instant at 30s rate(some_metric[1m]) {} {{schema:-53 sum:0.03333333333333333 count:0.03333333333333333 custom_values:[5 10] buckets:[0.03333333333333333]}} @@ -1170,12 +1107,10 @@ load 6m # T=0: only exponential # T=6: only custom # T=12: mixed, should be ignored and emit a warning -eval range from 0 to 12m step 6m sum(metric) - expect warn +eval_warn range from 0 to 12m step 6m sum(metric) {} {{sum:7 count:5 buckets:[2 3 2]}} {{schema:-53 sum:16 count:3 custom_values:[5 10] buckets:[1 2]}} _ -eval range from 0 to 12m step 6m avg(metric) - expect warn +eval_warn range from 0 to 12m step 6m avg(metric) {} {{sum:3.5 count:2.5 buckets:[1 1.5 1]}} {{schema:-53 sum:8 count:1.5 custom_values:[5 10] buckets:[0.5 1]}} _ clear @@ -1189,12 +1124,10 @@ load 6m # T=0: incompatible, should be ignored and emit a warning # T=6: compatible # T=12: incompatible followed by compatible, should be ignored and emit a warning -eval range from 0 to 12m step 6m sum(metric) - expect warn +eval_warn range from 0 to 12m step 6m sum(metric) {} _ {{schema:-53 sum:2 count:2 custom_values:[5 10] buckets:[2]}} _ -eval range from 0 to 12m step 6m avg(metric) - expect warn +eval_warn range from 0 to 12m step 6m avg(metric) {} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _ # Test incompatible schemas with additional aggregation operators @@ -1226,11 +1159,9 @@ eval range from 0 to 12m step 6m metric{series="1"} or ignoring(series) metric{s metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ _ # Test incompatible schemas with arithmetic binary operators -eval range from 0 to 12m step 6m metric{series="2"} + ignoring (series) metric{series="3"} - expect warn +eval_warn range from 0 to 12m step 6m metric{series="2"} + ignoring (series) metric{series="3"} -eval range from 0 to 12m step 6m metric{series="2"} - ignoring (series) metric{series="3"} - expect warn +eval_warn range from 0 to 12m step 6m metric{series="2"} - ignoring (series) metric{series="3"} clear @@ -1240,15 +1171,12 @@ load 6m metric2 {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} eval range from 0 to 6m step 6m metric1 == metric2 - expect no_info - metric1{} _ {{schema:-53 count:1 sum:1 custom_values:[5 10] buckets:[1]}} +metric1{} _ {{schema:-53 count:1 sum:1 custom_values:[5 10] buckets:[1]}} eval range from 0 to 6m step 6m metric1 != metric2 - expect no_info - metric1{} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ +metric1{} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ -eval range from 0 to 6m step 6m metric2 > metric2 - expect info +eval_info range from 0 to 6m step 6m metric2 > metric2 clear @@ -1258,82 +1186,62 @@ load 6m # If evaluating at 12m, the first two NHCBs have the same custom values # while the 3rd one has different ones. -eval instant at 12m sum_over_time(nhcb_metric[13m]) - expect warn +eval_warn instant at 12m sum_over_time(nhcb_metric[13m]) -eval instant at 12m avg_over_time(nhcb_metric[13m]) - expect warn +eval_warn instant at 12m avg_over_time(nhcb_metric[13m]) eval instant at 12m last_over_time(nhcb_metric[13m]) - expect no_warn - nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} +nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} eval instant at 12m count_over_time(nhcb_metric[13m]) - expect no_warn - {} 3 +{} 3 eval instant at 12m present_over_time(nhcb_metric[13m]) - expect no_warn - {} 1 +{} 1 eval instant at 12m changes(nhcb_metric[13m]) - expect no_warn - {} 1 +{} 1 -eval instant at 12m delta(nhcb_metric[13m]) - expect warn +eval_warn instant at 12m delta(nhcb_metric[13m]) -eval instant at 12m increase(nhcb_metric[13m]) - expect warn +eval_warn instant at 12m increase(nhcb_metric[13m]) -eval instant at 12m rate(nhcb_metric[13m]) - expect warn +eval_warn instant at 12m rate(nhcb_metric[13m]) eval instant at 12m resets(nhcb_metric[13m]) - expect no_warn - {} 1 +{} 1 # Now doing the same again, but at 18m, where the first NHCB has # different custom_values compared to the other two. This now # works with no warning for increase() and rate(). No change # otherwise. -eval instant at 18m sum_over_time(nhcb_metric[13m]) - expect warn +eval_warn instant at 18m sum_over_time(nhcb_metric[13m]) -eval instant at 18m avg_over_time(nhcb_metric[13m]) - expect warn +eval_warn instant at 18m avg_over_time(nhcb_metric[13m]) eval instant at 18m last_over_time(nhcb_metric[13m]) - expect no_warn - nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} +nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} eval instant at 18m count_over_time(nhcb_metric[13m]) - expect no_warn - {} 3 +{} 3 eval instant at 18m present_over_time(nhcb_metric[13m]) - expect no_warn - {} 1 +{} 1 eval instant at 18m changes(nhcb_metric[13m]) - expect no_warn - {} 1 +{} 1 -eval instant at 18m delta(nhcb_metric[13m]) - expect warn +eval_warn instant at 18m delta(nhcb_metric[13m]) eval instant at 18m increase(nhcb_metric[13m]) - expect no_warn - {} {{schema:-53 count:1.0833333333333333 sum:1.0833333333333333 custom_values:[5 10] buckets:[1.0833333333333333]}} +{} {{schema:-53 count:1.0833333333333333 sum:1.0833333333333333 custom_values:[5 10] buckets:[1.0833333333333333]}} eval instant at 18m rate(nhcb_metric[13m]) - expect no_warn - {} {{schema:-53 count:0.0013888888888888887 sum:0.0013888888888888887 custom_values:[5 10] buckets:[0.0013888888888888887]}} +{} {{schema:-53 count:0.0013888888888888887 sum:0.0013888888888888887 custom_values:[5 10] buckets:[0.0013888888888888887]}} eval instant at 18m resets(nhcb_metric[13m]) - expect no_warn - {} 1 +{} 1 clear @@ -1351,8 +1259,7 @@ load 1m metric{group="incompatible-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} metric{group="incompatible-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} -eval instant at 0 sum by (group) (metric) - expect warn +eval_warn instant at 0 sum by (group) (metric) {group="just-floats"} 5 {group="just-exponential-histograms"} {{sum:5 count:7 buckets:[2 3 2]}} {group="just-custom-histograms"} {{schema:-53 sum:4 count:5 custom_values:[2] buckets:[8]}} @@ -1368,22 +1275,17 @@ load 10m histogram_sum_float{idx="0"} 42.0x1 eval instant at 10m sum(histogram_sum) - expect no_warn {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}} -eval instant at 10m sum({idx="0"}) - expect warn +eval_warn instant at 10m sum({idx="0"}) eval instant at 10m sum(histogram_sum{idx="0"} + ignoring(idx) histogram_sum{idx="1"} + ignoring(idx) histogram_sum{idx="2"} + ignoring(idx) histogram_sum{idx="3"}) - expect no_warn {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}} eval instant at 10m count(histogram_sum) - expect no_warn {} 4 eval instant at 10m avg(histogram_sum) - expect no_warn {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}} clear @@ -1474,25 +1376,21 @@ eval instant at 1m histogram_fraction(-Inf, +Inf, histogram_nan) clear -# Tests to demonstrate how an extrapolation below zero is prevented for both float counters and native counter histograms. -# Note that the float counter behaves the same as the histogram count after `increase`. +# Tests to demonstrate how an extrapolation below zero is prevented for a float counter, but not for native histograms. +# I.e. the float counter that behaves the same as the histogram count might yield a different result after `increase`. load 1m metric{type="histogram"} {{schema:0 count:15 sum:25 buckets:[5 10]}} {{schema:0 count:2490 sum:75 buckets:[15 2475]}}x55 metric{type="counter"} 15 2490x55 # End of range coincides with sample. Zero point of count is reached within the range. -# Note that the 2nd bucket has an exaggerated increase of 2479.939393939394 (although -# it has a value of only 2475 at the end of the range). eval instant at 55m increase(metric[90m]) - {type="histogram"} {{count:2490 sum:50.303030303030305 counter_reset_hint:gauge buckets:[10.06060606060606 2479.939393939394]}} + {type="histogram"} {{count:2497.5 sum:50.45454545454545 counter_reset_hint:gauge buckets:[10.09090909090909 2487.409090909091]}} {type="counter"} 2490 # End of range does not coincide with sample. Zero point of count is reached within the range. -# The 2nd bucket again has an exaggerated increase, but it is less obvious because of the -# right-side extrapolation. eval instant at 54m30s increase(metric[90m]) - {type="histogram"} {{count:2512.9166666666665 sum:50.76599326599326 counter_reset_hint:gauge buckets:[10.153198653198652 2502.7634680134674]}} + {type="histogram"} {{count:2520.8333333333335 sum:50.92592592592593 counter_reset_hint:gauge buckets:[10.185185185185187 2510.6481481481483]}} {type="counter"} 2512.9166666666665 # End of range coincides with sample. Zero point of count is reached outside of (i.e. before) the range. @@ -1510,16 +1408,3 @@ eval instant at 55m increase(metric[55m15s]) eval instant at 54m30s increase(metric[54m45s]) {type="histogram"} {{count:2509.375 sum:50.69444444444444 counter_reset_hint:gauge buckets:[10.13888888888889 2499.236111111111]}} {type="counter"} 2509.375 - -# Try the same, but now extract just the histogram count via `histogram_count`. -eval instant at 55m histogram_count(increase(metric[90m])) - {type="histogram"} 2490 - -eval instant at 54m30s histogram_count(increase(metric[90m])) - {type="histogram"} 2512.9166666666665 - -eval instant at 55m histogram_count(increase(metric[55m15s])) - {type="histogram"} 2486.25 - -eval instant at 54m30s histogram_count(increase(metric[54m45s])) - {type="histogram"} 2509.375 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test index 0e779f192cb..667989ca77d 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test @@ -289,32 +289,24 @@ eval instant at 50m http_requests_total{job="api-server", instance="0", group="p {job="api-server", instance="0", group="production"} 1 # The histogram is ignored here so the result doesn't change but it has an info annotation now. -eval instant at 5m {job="app-server"} == 80 - expect info +eval_info instant at 5m {job="app-server"} == 80 http_requests_total{group="canary", instance="1", job="app-server"} 80 -eval instant at 5m http_requests_histogram != 80 - expect info +eval_info instant at 5m http_requests_histogram != 80 -eval instant at 5m http_requests_histogram > 80 - expect info +eval_info instant at 5m http_requests_histogram > 80 -eval instant at 5m http_requests_histogram < 80 - expect info +eval_info instant at 5m http_requests_histogram < 80 -eval instant at 5m http_requests_histogram >= 80 - expect info +eval_info instant at 5m http_requests_histogram >= 80 -eval instant at 5m http_requests_histogram <= 80 - expect info +eval_info instant at 5m http_requests_histogram <= 80 # Should produce valid results in case of (in)equality between two histograms. eval instant at 5m http_requests_histogram == http_requests_histogram - expect no_info http_requests_histogram{job="app-server", instance="1", group="production"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} eval instant at 5m http_requests_histogram != http_requests_histogram - expect no_info # group_left/group_right. @@ -478,8 +470,7 @@ load 5m testmetric1{src="a",dst="b"} 0 testmetric2{src="a",dst="b"} 1 -eval instant at 0m -{__name__=~'testmetric1|testmetric2'} - expect fail +eval_fail instant at 0m -{__name__=~'testmetric1|testmetric2'} clear @@ -529,386 +520,290 @@ load 6m right_floats_for_histograms 0 -1 2 3 4 eval range from 0 to 60m step 6m left_floats == right_floats - expect no_info left_floats _ _ _ _ 3 _ _ _ _ Inf -Inf eval range from 0 to 60m step 6m left_floats == bool right_floats - expect no_info {} 0 _ _ _ 1 _ 0 0 0 1 1 eval range from 0 to 60m step 6m left_floats == does_not_match - expect no_info # No results. eval range from 0 to 24m step 6m left_histograms == right_histograms - expect no_info left_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} _ _ _ _ eval range from 0 to 24m step 6m left_histograms == bool right_histograms - expect no_info {} 1 0 _ _ _ -eval range from 0 to 24m step 6m left_histograms == right_floats_for_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms == right_floats_for_histograms # No results. -eval range from 0 to 24m step 6m left_histograms == bool right_floats_for_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms == bool right_floats_for_histograms # No results. eval range from 0 to 60m step 6m left_floats != right_floats - expect no_info left_floats 1 _ _ _ _ _ 4 5 NaN _ _ eval range from 0 to 60m step 6m left_floats != bool right_floats - expect no_info {} 1 _ _ _ 0 _ 1 1 1 0 0 eval range from 0 to 24m step 6m left_histograms != right_histograms - expect no_info left_histograms _ {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} _ _ _ eval range from 0 to 24m step 6m left_histograms != bool right_histograms - expect no_info {} 0 1 _ _ _ -eval range from 0 to 24m step 6m left_histograms != right_floats_for_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms != right_floats_for_histograms # No results. -eval range from 0 to 24m step 6m left_histograms != bool right_floats_for_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms != bool right_floats_for_histograms # No results. eval range from 0 to 60m step 6m left_floats > right_floats - expect no_info left_floats _ _ _ _ _ _ 4 _ _ _ _ eval range from 0 to 60m step 6m left_floats > bool right_floats - expect no_info {} 0 _ _ _ 0 _ 1 0 0 0 0 -eval range from 0 to 24m step 6m left_histograms > right_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms > right_histograms # No results. -eval range from 0 to 24m step 6m left_histograms > bool right_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms > bool right_histograms # No results. -eval range from 0 to 24m step 6m left_histograms > right_floats_for_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms > right_floats_for_histograms # No results. -eval range from 0 to 24m step 6m left_histograms > bool right_floats_for_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms > bool right_floats_for_histograms # No results. eval range from 0 to 60m step 6m left_floats >= right_floats - expect no_info left_floats _ _ _ _ 3 _ 4 _ _ Inf -Inf eval range from 0 to 60m step 6m left_floats >= bool right_floats - expect no_info {} 0 _ _ _ 1 _ 1 0 0 1 1 -eval range from 0 to 24m step 6m left_histograms >= right_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms >= right_histograms # No results. -eval range from 0 to 24m step 6m left_histograms >= bool right_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms >= bool right_histograms # No results. -eval range from 0 to 24m step 6m left_histograms >= right_floats_for_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms >= right_floats_for_histograms # No results. -eval range from 0 to 24m step 6m left_histograms >= bool right_floats_for_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms >= bool right_floats_for_histograms # No results. eval range from 0 to 60m step 6m left_floats < right_floats - expect no_info left_floats 1 _ _ _ _ _ _ 5 _ _ _ eval range from 0 to 60m step 6m left_floats < bool right_floats - expect no_info {} 1 _ _ _ 0 _ 0 1 0 0 0 -eval range from 0 to 24m step 6m left_histograms < right_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms < right_histograms # No results. -eval range from 0 to 24m step 6m left_histograms < bool right_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms < bool right_histograms # No results. -eval range from 0 to 24m step 6m left_histograms < right_floats_for_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms < right_floats_for_histograms # No results. -eval range from 0 to 24m step 6m left_histograms < bool right_floats_for_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms < bool right_floats_for_histograms # No results. eval range from 0 to 60m step 6m left_floats <= right_floats - expect no_info left_floats 1 _ _ _ 3 _ _ 5 _ Inf -Inf eval range from 0 to 60m step 6m left_floats <= bool right_floats - expect no_info {} 1 _ _ _ 1 _ 0 1 0 1 1 -eval range from 0 to 24m step 6m left_histograms <= right_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms <= right_histograms # No results. -eval range from 0 to 24m step 6m left_histograms <= bool right_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms <= bool right_histograms # No results. -eval range from 0 to 24m step 6m left_histograms <= right_floats_for_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms <= right_floats_for_histograms # No results. -eval range from 0 to 24m step 6m left_histograms <= bool right_floats_for_histograms - expect info +eval_info range from 0 to 24m step 6m left_histograms <= bool right_floats_for_histograms # No results. # Vector / scalar combinations with scalar on right side eval range from 0 to 60m step 6m left_floats == 3 - expect no_info left_floats _ _ _ _ 3 _ _ _ _ _ _ eval range from 0 to 60m step 6m left_floats != 3 - expect no_info left_floats 1 2 _ _ _ _ 4 5 NaN Inf -Inf eval range from 0 to 60m step 6m left_floats > 3 - expect no_info left_floats _ _ _ _ _ _ 4 5 _ Inf _ eval range from 0 to 60m step 6m left_floats >= 3 - expect no_info left_floats _ _ _ _ 3 _ 4 5 _ Inf _ eval range from 0 to 60m step 6m left_floats < 3 - expect no_info left_floats 1 2 _ _ _ _ _ _ _ _ -Inf eval range from 0 to 60m step 6m left_floats <= 3 - expect no_info left_floats 1 2 _ _ 3 _ _ _ _ _ -Inf eval range from 0 to 60m step 6m left_floats == bool 3 - expect no_info {} 0 0 _ _ 1 _ 0 0 0 0 0 eval range from 0 to 60m step 6m left_floats == Inf - expect no_info left_floats _ _ _ _ _ _ _ _ _ Inf _ eval range from 0 to 60m step 6m left_floats == bool Inf - expect no_info {} 0 0 _ _ 0 _ 0 0 0 1 0 eval range from 0 to 60m step 6m left_floats == NaN - expect no_info # No results. eval range from 0 to 60m step 6m left_floats == bool NaN - expect no_info {} 0 0 _ _ 0 _ 0 0 0 0 0 -eval range from 0 to 24m step 6m left_histograms == 3 - expect info +eval_info range from 0 to 24m step 6m left_histograms == 3 # No results. -eval range from 0 to 24m step 6m left_histograms == 0 - expect info +eval_info range from 0 to 24m step 6m left_histograms == 0 # No results. -eval range from 0 to 24m step 6m left_histograms != 3 - expect info +eval_info range from 0 to 24m step 6m left_histograms != 3 # No results. -eval range from 0 to 24m step 6m left_histograms != 0 - expect info +eval_info range from 0 to 24m step 6m left_histograms != 0 # No results. -eval range from 0 to 24m step 6m left_histograms > 3 - expect info +eval_info range from 0 to 24m step 6m left_histograms > 3 # No results. -eval range from 0 to 24m step 6m left_histograms > 0 - expect info +eval_info range from 0 to 24m step 6m left_histograms > 0 # No results. -eval range from 0 to 24m step 6m left_histograms >= 3 - expect info +eval_info range from 0 to 24m step 6m left_histograms >= 3 # No results. -eval range from 0 to 24m step 6m left_histograms >= 0 - expect info +eval_info range from 0 to 24m step 6m left_histograms >= 0 # No results. -eval range from 0 to 24m step 6m left_histograms < 3 - expect info +eval_info range from 0 to 24m step 6m left_histograms < 3 # No results. -eval range from 0 to 24m step 6m left_histograms < 0 - expect info +eval_info range from 0 to 24m step 6m left_histograms < 0 # No results. -eval range from 0 to 24m step 6m left_histograms <= 3 - expect info +eval_info range from 0 to 24m step 6m left_histograms <= 3 # No results. -eval range from 0 to 24m step 6m left_histograms <= 0 - expect info +eval_info range from 0 to 24m step 6m left_histograms <= 0 # No results. -eval range from 0 to 24m step 6m left_histograms == bool 3 - expect info +eval_info range from 0 to 24m step 6m left_histograms == bool 3 # No results. -eval range from 0 to 24m step 6m left_histograms == bool 0 - expect info +eval_info range from 0 to 24m step 6m left_histograms == bool 0 # No results. -eval range from 0 to 24m step 6m left_histograms != bool 3 - expect info +eval_info range from 0 to 24m step 6m left_histograms != bool 3 # No results. -eval range from 0 to 24m step 6m left_histograms != bool 0 - expect info +eval_info range from 0 to 24m step 6m left_histograms != bool 0 # No results. -eval range from 0 to 24m step 6m left_histograms > bool 3 - expect info +eval_info range from 0 to 24m step 6m left_histograms > bool 3 # No results. -eval range from 0 to 24m step 6m left_histograms > bool 0 - expect info +eval_info range from 0 to 24m step 6m left_histograms > bool 0 # No results. -eval range from 0 to 24m step 6m left_histograms >= bool 3 - expect info +eval_info range from 0 to 24m step 6m left_histograms >= bool 3 # No results. -eval range from 0 to 24m step 6m left_histograms >= bool 0 - expect info +eval_info range from 0 to 24m step 6m left_histograms >= bool 0 # No results. -eval range from 0 to 24m step 6m left_histograms < bool 3 - expect info +eval_info range from 0 to 24m step 6m left_histograms < bool 3 # No results. -eval range from 0 to 24m step 6m left_histograms < bool 0 - expect info +eval_info range from 0 to 24m step 6m left_histograms < bool 0 # No results. -eval range from 0 to 24m step 6m left_histograms <= bool 3 - expect info +eval_info range from 0 to 24m step 6m left_histograms <= bool 3 # No results. -eval range from 0 to 24m step 6m left_histograms <= bool 0 - expect info +eval_info range from 0 to 24m step 6m left_histograms <= bool 0 # No results. # Vector / scalar combinations with scalar on left side eval range from 0 to 60m step 6m 3 == left_floats - expect no_info left_floats _ _ _ _ 3 _ _ _ _ _ _ eval range from 0 to 60m step 6m 3 != left_floats - expect no_info left_floats 1 2 _ _ _ _ 4 5 NaN Inf -Inf eval range from 0 to 60m step 6m 3 < left_floats - expect no_info left_floats _ _ _ _ _ _ 4 5 _ Inf _ eval range from 0 to 60m step 6m 3 <= left_floats - expect no_info left_floats _ _ _ _ 3 _ 4 5 _ Inf _ eval range from 0 to 60m step 6m 3 > left_floats - expect no_info left_floats 1 2 _ _ _ _ _ _ _ _ -Inf eval range from 0 to 60m step 6m 3 >= left_floats - expect no_info left_floats 1 2 _ _ 3 _ _ _ _ _ -Inf eval range from 0 to 60m step 6m 3 == bool left_floats - expect no_info {} 0 0 _ _ 1 _ 0 0 0 0 0 eval range from 0 to 60m step 6m Inf == left_floats - expect no_info left_floats _ _ _ _ _ _ _ _ _ Inf _ eval range from 0 to 60m step 6m Inf == bool left_floats - expect no_info {} 0 0 _ _ 0 _ 0 0 0 1 0 eval range from 0 to 60m step 6m NaN == left_floats - expect no_info - expect no_warn # No results. eval range from 0 to 60m step 6m NaN == bool left_floats - expect no_info {} 0 0 _ _ 0 _ 0 0 0 0 0 -eval range from 0 to 24m step 6m 3 == left_histograms - expect info +eval_info range from 0 to 24m step 6m 3 == left_histograms # No results. -eval range from 0 to 24m step 6m 0 == left_histograms - expect info +eval_info range from 0 to 24m step 6m 0 == left_histograms # No results. -eval range from 0 to 24m step 6m 3 != left_histograms - expect info +eval_info range from 0 to 24m step 6m 3 != left_histograms # No results. -eval range from 0 to 24m step 6m 0 != left_histograms - expect info +eval_info range from 0 to 24m step 6m 0 != left_histograms # No results. -eval range from 0 to 24m step 6m 3 < left_histograms - expect info +eval_info range from 0 to 24m step 6m 3 < left_histograms # No results. -eval range from 0 to 24m step 6m 0 < left_histograms - expect info +eval_info range from 0 to 24m step 6m 0 < left_histograms # No results. -eval range from 0 to 24m step 6m 3 < left_histograms - expect info +eval_info range from 0 to 24m step 6m 3 < left_histograms # No results. -eval range from 0 to 24m step 6m 0 < left_histograms - expect info +eval_info range from 0 to 24m step 6m 0 < left_histograms # No results. -eval range from 0 to 24m step 6m 3 > left_histograms - expect info +eval_info range from 0 to 24m step 6m 3 > left_histograms # No results. -eval range from 0 to 24m step 6m 0 > left_histograms - expect info +eval_info range from 0 to 24m step 6m 0 > left_histograms # No results. -eval range from 0 to 24m step 6m 3 >= left_histograms - expect info +eval_info range from 0 to 24m step 6m 3 >= left_histograms # No results. -eval range from 0 to 24m step 6m 0 >= left_histograms - expect info +eval_info range from 0 to 24m step 6m 0 >= left_histograms # No results. clear diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test index f803dba349d..8c7c178b852 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test @@ -156,6 +156,4 @@ load 5m foo 3+0x10 eval instant at 12m min_over_time((topk(1, foo))[1m:5m]) - expect no_info - expect no_warn #empty diff --git a/vendor/github.com/prometheus/prometheus/promql/value.go b/vendor/github.com/prometheus/prometheus/promql/value.go index 2e387117e51..dc59b9e9cc8 100644 --- a/vendor/github.com/prometheus/prometheus/promql/value.go +++ b/vendor/github.com/prometheus/prometheus/promql/value.go @@ -475,12 +475,8 @@ func (ssi *storageSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *his panic(errors.New("storageSeriesIterator: AtHistogram not supported")) } -func (ssi *storageSeriesIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { - if fh == nil { - return ssi.currT, ssi.currH.Copy() - } - ssi.currH.CopyTo(fh) - return ssi.currT, fh +func (ssi *storageSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { + return ssi.currT, ssi.currH } func (ssi *storageSeriesIterator) AtT() int64 { diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go index c2da4558588..7ffce2f38d7 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/manager.go +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -127,10 +127,7 @@ func (m *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) error { go m.reloader() for { select { - case ts, ok := <-tsets: - if !ok { - break - } + case ts := <-tsets: m.updateTsets(ts) select { diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index b4f34a6f5b3..d92b7fc16b5 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -23,7 +23,6 @@ import ( "log/slog" "math" "net/http" - "net/http/httptrace" "reflect" "slices" "strconv" @@ -37,10 +36,6 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" - "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -149,13 +144,15 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed logger = promslog.NewNopLogger() } - client, err := newScrapeClient(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...) + client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...) if err != nil { - return nil, err + return nil, fmt.Errorf("error creating HTTP client: %w", err) } - var escapingScheme model.EscapingScheme - escapingScheme, err = config.ToEscapingScheme(cfg.MetricNameEscapingScheme, cfg.MetricNameValidationScheme) + if cfg.MetricNameValidationScheme == model.UnsetValidation { + return nil, errors.New("cfg.MetricNameValidationScheme must be set in scrape configuration") + } + escapingScheme, err := config.ToEscapingScheme(cfg.MetricNameEscapingScheme, cfg.MetricNameValidationScheme) if err != nil { return nil, fmt.Errorf("invalid metric name escaping scheme, %w", err) } @@ -316,16 +313,19 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { sp.metrics.targetScrapePoolReloads.Inc() start := time.Now() - client, err := newScrapeClient(cfg.HTTPClientConfig, cfg.JobName, sp.httpOpts...) + client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, sp.httpOpts...) if err != nil { sp.metrics.targetScrapePoolReloadsFailed.Inc() - return err + return fmt.Errorf("error creating HTTP client: %w", err) } reuseCache := reusableCache(sp.config, cfg) sp.config = cfg oldClient := sp.client sp.client = client + if cfg.MetricNameValidationScheme == model.UnsetValidation { + return errors.New("cfg.MetricNameValidationScheme must be set in scrape configuration") + } sp.validationScheme = cfg.MetricNameValidationScheme var escapingScheme model.EscapingScheme escapingScheme, err = model.ToEscapingScheme(cfg.MetricNameEscapingScheme) @@ -829,8 +829,6 @@ func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) { s.req = req } - ctx, span := otel.Tracer("").Start(ctx, "Scrape", trace.WithSpanKind(trace.SpanKindClient)) - defer span.End() return s.client.Do(s.req.WithContext(ctx)) } @@ -2275,16 +2273,3 @@ func pickSchema(bucketFactor float64) int32 { return int32(floor) } } - -func newScrapeClient(cfg config_util.HTTPClientConfig, name string, optFuncs ...config_util.HTTPClientOption) (*http.Client, error) { - client, err := config_util.NewClientFromConfig(cfg, name, optFuncs...) - if err != nil { - return nil, fmt.Errorf("error creating HTTP client: %w", err) - } - client.Transport = otelhttp.NewTransport( - client.Transport, - otelhttp.WithClientTrace(func(ctx context.Context) *httptrace.ClientTrace { - return otelhttptrace.NewClientTrace(ctx, otelhttptrace.WithoutSubSpans()) - })) - return client, nil -} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 8e43b62aae2..b763b3e2b4b 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -133,7 +133,6 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s // Include name, version and schema URL. scopeLabelCount = scope.attributes.Len() + 3 } - // Calculate the maximum possible number of labels we could return so we can preallocate l. maxLabelCount := attributes.Len() + len(settings.ExternalLabels) + len(promotedAttrs) + scopeLabelCount + len(extras)/2 @@ -176,15 +175,15 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s } } if promoteScope { + l["otel_scope_name"] = scope.name + l["otel_scope_version"] = scope.version + l["otel_scope_schema_url"] = scope.schemaURL scope.attributes.Range(func(k string, v pcommon.Value) bool { - name := labelNamer.Build("otel_scope_" + k) + name := "otel_scope_" + k + name = labelNamer.Build(name) l[name] = v.AsString() return true }) - // Scope Name, Version and Schema URL are added after attributes to ensure they are not overwritten by attributes. - l["otel_scope_name"] = scope.name - l["otel_scope_version"] = scope.version - l["otel_scope_schema_url"] = scope.schemaURL } // Map service.name + service.namespace to job. diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 9955fd5fc66..91563bf2c29 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -53,10 +53,10 @@ type Settings struct { KeepIdentifyingResourceAttributes bool ConvertHistogramsToNHCB bool AllowDeltaTemporality bool - // LookbackDelta is the PromQL engine lookback delta. - LookbackDelta time.Duration // PromoteScopeMetadata controls whether to promote OTel scope metadata to metric labels. PromoteScopeMetadata bool + // LookbackDelta is the PromQL engine lookback delta. + LookbackDelta time.Duration // Mimir specifics. EnableCreatedTimestampZeroIngestion bool diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index cacad16a3c8..ef180ae4a2e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -600,16 +600,15 @@ func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) er otlpCfg := rw.config().OTLPConfig converter := otlptranslator.NewPrometheusConverter() - annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{ - AddMetricSuffixes: otlpCfg.TranslationStrategy.ShouldAddSuffixes(), - AllowUTF8: !otlpCfg.TranslationStrategy.ShouldEscape(), + AddMetricSuffixes: otlpCfg.TranslationStrategy != config.NoTranslation, + AllowUTF8: otlpCfg.TranslationStrategy != config.UnderscoreEscapingWithSuffixes, PromoteResourceAttributes: otlptranslator.NewPromoteResourceAttributes(otlpCfg), KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes, ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB, AllowDeltaTemporality: rw.allowDeltaTemporality, - LookbackDelta: rw.lookbackDelta, PromoteScopeMetadata: otlpCfg.PromoteScopeMetadata, + LookbackDelta: rw.lookbackDelta, // Mimir specifics. EnableCreatedTimestampZeroIngestion: rw.enableCTZeroIngestion, diff --git a/vendor/github.com/prometheus/prometheus/template/template.go b/vendor/github.com/prometheus/prometheus/template/template.go index 87ca32b3469..75a9f33bd2e 100644 --- a/vendor/github.com/prometheus/prometheus/template/template.go +++ b/vendor/github.com/prometheus/prometheus/template/template.go @@ -263,17 +263,6 @@ func NewTemplateExpander( return floatToTime(v) }, - "toDuration": func(i interface{}) (*time.Duration, error) { - v, err := common_templates.ConvertToFloat(i) - if err != nil { - return nil, err - } - d := time.Duration(v * float64(time.Second)) - return &d, nil - }, - "now": func() float64 { - return float64(timestamp) / 1000.0 - }, "pathPrefix": func() string { return externalURL.Path }, @@ -281,7 +270,7 @@ func NewTemplateExpander( return externalURL.String() }, "parseDuration": func(d string) (float64, error) { - v, err := model.ParseDurationAllowNegative(d) + v, err := model.ParseDuration(d) if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go index 7f3b2a5968f..e5ad4028bbb 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go @@ -343,7 +343,7 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) ( // original deltas to a new set of deltas to match a new span layout that adds // buckets, we simply need to generate a list of inserts. // -// Note: Within expandFloatSpansAndBuckets we don't have to worry about the changes to the +// Note: Within expandSpansForward we don't have to worry about the changes to the // spans themselves, thanks to the iterators we get to work with the more useful // bucket indices (which of course directly correspond to the buckets we have to // adjust). @@ -378,48 +378,6 @@ func expandFloatSpansAndBuckets(a, b []histogram.Span, aBuckets []xorValue, bBuc bCount = bBuckets[bCountIdx] } - // addInsert updates the current Insert with a new insert at the given - // bucket index (otherIdx). - addInsert := func(inserts []Insert, insert *Insert, otherIdx int) []Insert { - if insert.num == 0 { - // First insert. - insert.bucketIdx = otherIdx - } else if insert.bucketIdx+insert.num != otherIdx { - // Insert is not continuous from previous insert. - inserts = append(inserts, *insert) - insert.num = 0 - insert.bucketIdx = otherIdx - } - insert.num++ - return inserts - } - - advanceA := func() { - if aInter.num > 0 { - aInserts = append(aInserts, aInter) - aInter.num = 0 - } - aIdx, aOK = ai.Next() - aInter.pos++ - aCountIdx++ - if aOK { - aCount = aBuckets[aCountIdx].value - } - } - - advanceB := func() { - if bInter.num > 0 { - bInserts = append(bInserts, bInter) - bInter.num = 0 - } - bIdx, bOK = bi.Next() - bInter.pos++ - bCountIdx++ - if bOK { - bCount = bBuckets[bCountIdx] - } - } - loop: for { switch { @@ -431,37 +389,105 @@ loop: return nil, nil, false } - advanceA() - advanceB() + // Finish WIP insert for a and reset. + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + + // Finish WIP insert for b and reset. + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + + aIdx, aOK = ai.Next() + bIdx, bOK = bi.Next() + aInter.pos++ // Advance potential insert position. + aCountIdx++ // Advance absolute bucket count index for a. + if aOK { + aCount = aBuckets[aCountIdx].value + } + bInter.pos++ // Advance potential insert position. + bCountIdx++ // Advance absolute bucket count index for b. + if bOK { + bCount = bBuckets[bCountIdx] + } continue case aIdx < bIdx: // b misses a bucket index that is in a. // This is ok if the count in a is 0, in which case we make a note to // fill in the bucket in b and advance a. if aCount == 0 { - bInserts = addInsert(bInserts, &bInter, aIdx) - advanceA() + bInter.num++ // Mark that we need to insert a bucket in b. + bInter.bucketIdx = aIdx + // Advance a + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + aIdx, aOK = ai.Next() + aInter.pos++ + aCountIdx++ + if aOK { + aCount = aBuckets[aCountIdx].value + } continue } // Otherwise we are missing a bucket that was in use in a, which is a reset. return nil, nil, false case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare. - aInserts = addInsert(aInserts, &aInter, bIdx) - advanceB() + aInter.num++ + bInter.bucketIdx = bIdx + // Advance b + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + bIdx, bOK = bi.Next() + bInter.pos++ + bCountIdx++ + if bOK { + bCount = bBuckets[bCountIdx] + } } case aOK && !bOK: // b misses a value that is in a. // This is ok if the count in a is 0, in which case we make a note to // fill in the bucket in b and advance a. if aCount == 0 { - bInserts = addInsert(bInserts, &bInter, aIdx) - advanceA() + bInter.num++ + bInter.bucketIdx = aIdx + // Advance a + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + aIdx, aOK = ai.Next() + aInter.pos++ // Advance potential insert position. + // Update absolute bucket counts for a. + aCountIdx++ + if aOK { + aCount = aBuckets[aCountIdx].value + } continue } // Otherwise we are missing a bucket that was in use in a, which is a reset. return nil, nil, false case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. - aInserts = addInsert(aInserts, &aInter, bIdx) - advanceB() + aInter.num++ + bInter.bucketIdx = bIdx + // Advance b + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + bIdx, bOK = bi.Next() + bInter.pos++ // Advance potential insert position. + // Update absolute bucket counts for b. + bCountIdx++ + if bOK { + bCount = bBuckets[bCountIdx] + } default: // Both iterators ran out. We're done. if aInter.num > 0 { aInserts = append(aInserts, aInter) @@ -756,7 +782,7 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend // The histogram needs to be expanded to have the extra empty buckets // of the chunk. if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 { - // No new buckets from the histogram, so the spans of the appender can accommodate the new buckets. + // No new chunks from the histogram, so the spans of the appender can accommodate the new buckets. // However we need to make a copy in case the input is sharing spans from an iterator. h.PositiveSpans = make([]histogram.Span, len(a.pSpans)) copy(h.PositiveSpans, a.pSpans) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go index 4ba0c467d82..0f54eb69288 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go @@ -374,7 +374,7 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) ( // original deltas to a new set of deltas to match a new span layout that adds // buckets, we simply need to generate a list of inserts. // -// Note: Within expandIntSpansAndBuckets we don't have to worry about the changes to the +// Note: Within expandSpansForward we don't have to worry about the changes to the // spans themselves, thanks to the iterators we get to work with the more useful // bucket indices (which of course directly correspond to the buckets we have to // adjust). @@ -409,48 +409,6 @@ func expandIntSpansAndBuckets(a, b []histogram.Span, aBuckets, bBuckets []int64) bCount = bBuckets[bCountIdx] } - // addInsert updates the current Insert with a new insert at the given - // bucket index (otherIdx). - addInsert := func(inserts []Insert, insert *Insert, otherIdx int) []Insert { - if insert.num == 0 { - // First insert. - insert.bucketIdx = otherIdx - } else if insert.bucketIdx+insert.num != otherIdx { - // Insert is not continuous from previous insert. - inserts = append(inserts, *insert) - insert.num = 0 - insert.bucketIdx = otherIdx - } - insert.num++ - return inserts - } - - advanceA := func() { - if aInter.num > 0 { - aInserts = append(aInserts, aInter) - aInter.num = 0 - } - aIdx, aOK = ai.Next() - aInter.pos++ - aCountIdx++ - if aOK { - aCount += aBuckets[aCountIdx] - } - } - - advanceB := func() { - if bInter.num > 0 { - bInserts = append(bInserts, bInter) - bInter.num = 0 - } - bIdx, bOK = bi.Next() - bInter.pos++ - bCountIdx++ - if bOK { - bCount += bBuckets[bCountIdx] - } - } - loop: for { switch { @@ -462,37 +420,105 @@ loop: return nil, nil, false } - advanceA() - advanceB() + // Finish WIP insert for a and reset. + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + + // Finish WIP insert for b and reset. + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + + aIdx, aOK = ai.Next() + bIdx, bOK = bi.Next() + aInter.pos++ // Advance potential insert position. + aCountIdx++ // Advance absolute bucket count index for a. + if aOK { + aCount += aBuckets[aCountIdx] + } + bInter.pos++ // Advance potential insert position. + bCountIdx++ // Advance absolute bucket count index for b. + if bOK { + bCount += bBuckets[bCountIdx] + } continue case aIdx < bIdx: // b misses a bucket index that is in a. // This is ok if the count in a is 0, in which case we make a note to // fill in the bucket in b and advance a. if aCount == 0 { - bInserts = addInsert(bInserts, &bInter, aIdx) - advanceA() + bInter.num++ // Mark that we need to insert a bucket in b. + bInter.bucketIdx = aIdx + // Advance a + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + aIdx, aOK = ai.Next() + aInter.pos++ + aCountIdx++ + if aOK { + aCount += aBuckets[aCountIdx] + } continue } // Otherwise we are missing a bucket that was in use in a, which is a reset. return nil, nil, false case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare. - aInserts = addInsert(aInserts, &aInter, bIdx) - advanceB() + aInter.num++ + aInter.bucketIdx = bIdx + // Advance b + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + bIdx, bOK = bi.Next() + bInter.pos++ + bCountIdx++ + if bOK { + bCount += bBuckets[bCountIdx] + } } case aOK && !bOK: // b misses a value that is in a. // This is ok if the count in a is 0, in which case we make a note to // fill in the bucket in b and advance a. if aCount == 0 { - bInserts = addInsert(bInserts, &bInter, aIdx) - advanceA() + bInter.num++ + bInter.bucketIdx = aIdx + // Advance a + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + aIdx, aOK = ai.Next() + aInter.pos++ // Advance potential insert position. + // Update absolute bucket counts for a. + aCountIdx++ + if aOK { + aCount += aBuckets[aCountIdx] + } continue } // Otherwise we are missing a bucket that was in use in a, which is a reset. return nil, nil, false case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. - aInserts = addInsert(aInserts, &aInter, bIdx) - advanceB() + aInter.num++ + aInter.bucketIdx = bIdx + // Advance b + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + bIdx, bOK = bi.Next() + bInter.pos++ // Advance potential insert position. + // Update absolute bucket counts for b. + bCountIdx++ + if bOK { + bCount += bBuckets[bCountIdx] + } default: // Both iterators ran out. We're done. if aInter.num > 0 { aInserts = append(aInserts, aInter) @@ -797,7 +823,7 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h // The histogram needs to be expanded to have the extra empty buckets // of the chunk. if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 { - // No new buckets from the histogram, so the spans of the appender can accommodate the new buckets. + // No new chunks from the histogram, so the spans of the appender can accommodate the new buckets. // However we need to make a copy in case the input is sharing spans from an iterator. h.PositiveSpans = make([]histogram.Span, len(a.pSpans)) copy(h.PositiveSpans, a.pSpans) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go index 5ee783fd683..7bb31acf00c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go @@ -284,12 +284,101 @@ type Insert struct { bucketIdx int } -// expandSpansBothWays is similar to expandFloatSpansAndBuckets and -// expandIntSpansAndBuckets, but now b may also cover an entirely different set -// of buckets and counter resets are ignored. The function returns the “forward” -// inserts to expand 'a' to also cover all the buckets exclusively covered by -// 'b', and it returns the “backward” inserts to expand 'b' to also cover all -// the buckets exclusively covered by 'a'. +// Deprecated: expandSpansForward, use expandIntSpansAndBuckets or +// expandFloatSpansAndBuckets instead. +// expandSpansForward is left here for reference. +// expandSpansForward returns the inserts to expand the bucket spans 'a' so that +// they match the spans in 'b'. 'b' must cover the same or more buckets than +// 'a', otherwise the function will return false. +// +// Example: +// +// Let's say the old buckets look like this: +// +// span syntax: [offset, length] +// spans : [ 0 , 2 ] [2,1] [ 3 , 2 ] [3,1] [1,1] +// bucket idx : [0] [1] 2 3 [4] 5 6 7 [8] [9] 10 11 12 [13] 14 [15] +// raw values 6 3 3 2 4 5 1 +// deltas 6 -3 0 -1 2 1 -4 +// +// But now we introduce a new bucket layout. (Carefully chosen example where we +// have a span appended, one unchanged[*], one prepended, and two merge - in +// that order.) +// +// [*] unchanged in terms of which bucket indices they represent. but to achieve +// that, their offset needs to change if "disrupted" by spans changing ahead of +// them +// +// \/ this one is "unchanged" +// spans : [ 0 , 3 ] [1,1] [ 1 , 4 ] [ 3 , 3 ] +// bucket idx : [0] [1] [2] 3 [4] 5 [6] [7] [8] [9] 10 11 12 [13] [14] [15] +// raw values 6 3 0 3 0 0 2 4 5 0 1 +// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1 +// delta mods: / \ / \ / \ +// +// Note for histograms with delta-encoded buckets: Whenever any new buckets are +// introduced, the subsequent "old" bucket needs to readjust its delta to the +// new base of 0. Thus, for the caller who wants to transform the set of +// original deltas to a new set of deltas to match a new span layout that adds +// buckets, we simply need to generate a list of inserts. +// +// Note: Within expandSpansForward we don't have to worry about the changes to the +// spans themselves, thanks to the iterators we get to work with the more useful +// bucket indices (which of course directly correspond to the buckets we have to +// adjust). +func expandSpansForward(a, b []histogram.Span) (forward []Insert, ok bool) { + ai := newBucketIterator(a) + bi := newBucketIterator(b) + + var inserts []Insert + + // When inter.num becomes > 0, this becomes a valid insert that should + // be yielded when we finish a streak of new buckets. + var inter Insert + + av, aOK := ai.Next() + bv, bOK := bi.Next() +loop: + for { + switch { + case aOK && bOK: + switch { + case av == bv: // Both have an identical value. move on! + // Finish WIP insert and reset. + if inter.num > 0 { + inserts = append(inserts, inter) + } + inter.num = 0 + av, aOK = ai.Next() + bv, bOK = bi.Next() + inter.pos++ + case av < bv: // b misses a value that is in a. + return inserts, false + case av > bv: // a misses a value that is in b. Forward b and recompare. + inter.num++ + bv, bOK = bi.Next() + } + case aOK && !bOK: // b misses a value that is in a. + return inserts, false + case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. + inter.num++ + bv, bOK = bi.Next() + default: // Both iterators ran out. We're done. + if inter.num > 0 { + inserts = append(inserts, inter) + } + break loop + } + } + + return inserts, true +} + +// expandSpansBothWays is similar to expandSpansForward, but now b may also +// cover an entirely different set of buckets. The function returns the +// “forward” inserts to expand 'a' to also cover all the buckets exclusively +// covered by 'b', and it returns the “backward” inserts to expand 'b' to also +// cover all the buckets exclusively covered by 'a'. func expandSpansBothWays(a, b []histogram.Span) (forward, backward []Insert, mergedSpans []histogram.Span) { ai := newBucketIterator(a) bi := newBucketIterator(b) @@ -399,24 +488,14 @@ func insert[BV bucketValue](in, out []BV, inserts []Insert, deltas bool) []BV { ii int // The next insert to process. ) for i, d := range in { - if ii >= len(inserts) || i != inserts[ii].pos { - // No inserts at this position, the original delta is still valid. - out[oi] = d - oi++ - v += d - continue - } - // Process inserts. - firstInsert := true - for ii < len(inserts) && i == inserts[ii].pos { + if ii < len(inserts) && i == inserts[ii].pos { // We have an insert! // Add insert.num new delta values such that their // bucket values equate 0. When deltas==false, it means // that it is an absolute value. So we set it to 0 // directly. - if deltas && firstInsert { + if deltas { out[oi] = -v - firstInsert = false // No need to go to 0 in further inserts. } else { out[oi] = 0 } @@ -426,30 +505,32 @@ func insert[BV bucketValue](in, out []BV, inserts []Insert, deltas bool) []BV { oi++ } ii++ + + // Now save the value from the input. The delta value we + // should save is the original delta value + the last + // value of the point before the insert (to undo the + // delta that was introduced by the insert). When + // deltas==false, it means that it is an absolute value, + // so we set it directly to the value in the 'in' slice. + if deltas { + out[oi] = d + v + } else { + out[oi] = d + } + oi++ + v = d + v + continue } - // Now save the value from the input. The delta value we - // should save is the original delta value + the last - // value of the point before the insert (to undo the - // delta that was introduced by the insert). When - // deltas==false, it means that it is an absolute value, - // so we set it directly to the value in the 'in' slice. - if deltas { - out[oi] = d + v - } else { - out[oi] = d - } + // If there was no insert, the original delta is still valid. + out[oi] = d oi++ v += d } - // Insert empty buckets at the end. - for ii < len(inserts) { - if inserts[ii].pos < len(in) { - panic("leftover inserts must be after the current buckets") - } - // Add insert.num new delta values such that their - // bucket values equate 0. When deltas==false, it means - // that it is an absolute value. So we set it to 0 - // directly. + switch ii { + case len(inserts): + // All inserts processed. Nothing more to do. + case len(inserts) - 1: + // One more insert to process at the end. if deltas { out[oi] = -v } else { @@ -460,8 +541,8 @@ func insert[BV bucketValue](in, out []BV, inserts []Insert, deltas bool) []BV { out[oi] = 0 oi++ } - ii++ - v = 0 + default: + panic("unprocessed inserts left") } return out } @@ -547,7 +628,7 @@ func adjustForInserts(spans []histogram.Span, inserts []Insert) (mergedSpans []h } } for i < len(inserts) { - addBucket(insertIdx) + addBucket(inserts[i].bucketIdx) consumeInsert() } return diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 9101a474b75..66738751331 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -191,6 +191,14 @@ func DefaultPostingsDecoderFactory(_ *BlockMeta) index.PostingsDecoder { return index.DecodePostingsRaw } +func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { + return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ + MaxBlockChunkSegmentSize: maxBlockChunkSegmentSize, + MergeFunc: mergeFunc, + EnableOverlappingCompaction: true, + }) +} + func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ MergeFunc: mergeFunc, diff --git a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go index f8070ff3431..5da360b69ab 100644 --- a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go +++ b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go @@ -147,7 +147,7 @@ var ( IncompatibleBucketLayoutInBinOpWarning = fmt.Errorf("%w: incompatible bucket layout encountered for binary operator", PromQLWarning) PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo) - PossibleNonCounterLabelInfo = fmt.Errorf("%w: metric might not be a counter, __type__ label is not set to %q or %q", PromQLInfo, model.MetricTypeCounter, model.MetricTypeHistogram) + PossibleNonCounterLabelInfo = fmt.Errorf("%w: metric might not be a counter, __type__ label is not set to %q", PromQLInfo, model.MetricTypeCounter) HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) IncompatibleTypesInBinOpInfo = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo) HistogramIgnoredInAggregationInfo = fmt.Errorf("%w: ignored histogram in", PromQLInfo) diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index 4f3926a2ea8..180be686122 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -544,7 +544,8 @@ func extractQueryOpts(r *http.Request) (promql.QueryOpts, error) { duration = parsedDuration } - return promql.NewPrometheusQueryOpts(r.FormValue("stats") == "all", duration), nil + //nolint:staticcheck + return promql.NewPrometheusQueryOpts(r.FormValue("stats") == "all", duration, model.NameValidationScheme), nil } func (api *API) queryRange(r *http.Request) (result apiFuncResult) { diff --git a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go index f3838d17ba0..37475c3d6df 100644 --- a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go +++ b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go @@ -13,7 +13,6 @@ import ( "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" @@ -43,8 +42,7 @@ type producer struct { func NewMetricProducer(opts ...Option) metric.Producer { cfg := newConfig(opts...) return &producer{ - // TODO: Parameterize name validation scheme. - gatherers: prometheus.NewGatherers(cfg.gatherers, model.UTF8Validation), + gatherers: cfg.gatherers, } } @@ -52,18 +50,18 @@ func (p *producer) Produce(context.Context) ([]metricdata.ScopeMetrics, error) { now := time.Now() var errs multierr otelMetrics := make([]metricdata.Metrics, 0) - p.gatherers.Range(func(gatherer prometheus.Gatherer) { + for _, gatherer := range p.gatherers { promMetrics, err := gatherer.Gather() if err != nil { errs = append(errs, err) - return + continue } m, err := convertPrometheusMetricsInto(promMetrics, now) otelMetrics = append(otelMetrics, m...) if err != nil { errs = append(errs, err) } - }) + } if errs.errOrNil() != nil { otel.Handle(errs.errOrNil()) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go index b7b512cd203..ceb2d63e2a9 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go @@ -25,7 +25,6 @@ type config struct { disableScopeInfo bool namespace string resourceAttributesFilter attribute.Filter - validationScheme model.ValidationScheme } var logDeprecatedLegacyScheme = sync.OnceFunc(func() { @@ -126,8 +125,9 @@ func WithoutCounterSuffixes() Option { }) } -// WithoutScopeInfo configures the Exporter to not export -// labels about Instrumentation Scope to all metric points. +// WithoutScopeInfo configures the Exporter to not export the otel_scope_info metric. +// If not specified, the Exporter will create a otel_scope_info metric containing +// the metrics' Instrumentation Scope, and also add labels about Instrumentation Scope to all metric points. func WithoutScopeInfo() Option { return optionFunc(func(cfg config) config { cfg.disableScopeInfo = true @@ -136,11 +136,11 @@ func WithoutScopeInfo() Option { } // WithNamespace configures the Exporter to prefix metric with the given namespace. -// Metadata metrics such as target_info are not prefixed since these +// Metadata metrics such as target_info and otel_scope_info are not prefixed since these // have special behavior based on their name. -func WithNamespace(ns string, validationScheme model.ValidationScheme) Option { +func WithNamespace(ns string) Option { return optionFunc(func(cfg config) config { - if validationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. + if model.NameValidationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. logDeprecatedLegacyScheme() // Only sanitize if prometheus does not support UTF-8. ns = model.EscapeName(ns, model.NameEscapingScheme) @@ -166,12 +166,3 @@ func WithResourceAsConstantLabels(resourceFilter attribute.Filter) Option { return cfg }) } - -// WithValidationScheme configures the Exporter to validate label and metric names -// according to this scheme. Defaults to UTF8Validation. -func WithValidationScheme(scheme model.ValidationScheme) Option { - return optionFunc(func(cfg config) config { - cfg.validationScheme = scheme - return cfg - }) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go index 61482a3c0dd..e0959641caf 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go @@ -21,6 +21,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" @@ -30,20 +31,25 @@ const ( targetInfoMetricName = "target_info" targetInfoDescription = "Target metadata" - scopeLabelPrefix = "otel_scope_" - scopeNameLabel = scopeLabelPrefix + "name" - scopeVersionLabel = scopeLabelPrefix + "version" - scopeSchemaLabel = scopeLabelPrefix + "schema_url" + scopeInfoMetricName = "otel_scope_info" + scopeInfoDescription = "Instrumentation Scope metadata" + + scopeNameLabel = "otel_scope_name" + scopeVersionLabel = "otel_scope_version" traceIDExemplarKey = "trace_id" spanIDExemplarKey = "span_id" ) -var metricsPool = sync.Pool{ - New: func() interface{} { - return &metricdata.ResourceMetrics{} - }, -} +var ( + errScopeInvalid = errors.New("invalid scope") + + metricsPool = sync.Pool{ + New: func() interface{} { + return &metricdata.ResourceMetrics{} + }, + } +) // Exporter is a Prometheus Exporter that embeds the OTel metric.Reader // interface for easy instantiation with a MeterProvider. @@ -91,9 +97,10 @@ type collector struct { mu sync.Mutex // mu protects all members below from the concurrent access. disableTargetInfo bool targetInfo prometheus.Metric + scopeInfos map[instrumentation.Scope]prometheus.Metric + scopeInfosInvalid map[instrumentation.Scope]struct{} metricFamilies map[string]*dto.MetricFamily resourceKeyVals keyVals - validationScheme model.ValidationScheme } // prometheus counters MUST have a _total suffix by default: @@ -115,10 +122,11 @@ func New(opts ...Option) (*Exporter, error) { withoutUnits: cfg.withoutUnits, withoutCounterSuffixes: cfg.withoutCounterSuffixes, disableScopeInfo: cfg.disableScopeInfo, + scopeInfos: make(map[instrumentation.Scope]prometheus.Metric), + scopeInfosInvalid: make(map[instrumentation.Scope]struct{}), metricFamilies: make(map[string]*dto.MetricFamily), namespace: cfg.namespace, resourceAttributesFilter: cfg.resourceAttributesFilter, - validationScheme: cfg.validationScheme, } if err := cfg.registerer.Register(collector); err != nil { @@ -166,7 +174,7 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { defer c.mu.Unlock() if c.targetInfo == nil && !c.disableTargetInfo { - targetInfo, err := createInfoMetric(targetInfoMetricName, targetInfoDescription, metrics.Resource, c.validationScheme) + targetInfo, err := createInfoMetric(targetInfoMetricName, targetInfoDescription, metrics.Resource) if err != nil { // If the target info metric is invalid, disable sending it. c.disableTargetInfo = true @@ -194,15 +202,20 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } if !c.disableScopeInfo { - kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel, scopeSchemaLabel) - kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version, scopeMetrics.Scope.SchemaURL) - - attrKeys, attrVals := getAttrs(scopeMetrics.Scope.Attributes, c.validationScheme) - for i := range attrKeys { - attrKeys[i] = scopeLabelPrefix + attrKeys[i] + scopeInfo, err := c.scopeInfo(scopeMetrics.Scope) + if errors.Is(err, errScopeInvalid) { + // Do not report the same error multiple times. + continue } - kv.keys = append(kv.keys, attrKeys...) - kv.vals = append(kv.vals, attrVals...) + if err != nil { + otel.Handle(err) + continue + } + + ch <- scopeInfo + + kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel) + kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version) } kv.keys = append(kv.keys, c.resourceKeyVals.keys...) @@ -226,131 +239,57 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { switch v := m.Data.(type) { case metricdata.Histogram[int64]: - addHistogramMetric(ch, v, m, name, kv, c.validationScheme) + addHistogramMetric(ch, v, m, name, kv) case metricdata.Histogram[float64]: - addHistogramMetric(ch, v, m, name, kv, c.validationScheme) + addHistogramMetric(ch, v, m, name, kv) case metricdata.ExponentialHistogram[int64]: - addExponentialHistogramMetric(ch, v, m, name, kv, c.validationScheme) + addExponentialHistogramMetric(ch, v, m, name, kv) case metricdata.ExponentialHistogram[float64]: - addExponentialHistogramMetric(ch, v, m, name, kv, c.validationScheme) + addExponentialHistogramMetric(ch, v, m, name, kv) case metricdata.Sum[int64]: - addSumMetric(ch, v, m, name, kv, c.validationScheme) + addSumMetric(ch, v, m, name, kv) case metricdata.Sum[float64]: - addSumMetric(ch, v, m, name, kv, c.validationScheme) + addSumMetric(ch, v, m, name, kv) case metricdata.Gauge[int64]: - addGaugeMetric(ch, v, m, name, kv, c.validationScheme) + addGaugeMetric(ch, v, m, name, kv) case metricdata.Gauge[float64]: - addGaugeMetric(ch, v, m, name, kv, c.validationScheme) + addGaugeMetric(ch, v, m, name, kv) } } } } -// downscaleExponentialBucket re-aggregates bucket counts when downscaling to a coarser resolution. -func downscaleExponentialBucket(bucket metricdata.ExponentialBucket, scaleDelta int32) metricdata.ExponentialBucket { - if len(bucket.Counts) == 0 || scaleDelta < 1 { - return metricdata.ExponentialBucket{ - Offset: bucket.Offset >> scaleDelta, - Counts: append([]uint64(nil), bucket.Counts...), // copy slice - } - } - - // The new offset is scaled down - newOffset := bucket.Offset >> scaleDelta - - // Pre-calculate the new bucket count to avoid growing slice - // Each group of 2^scaleDelta buckets will merge into one bucket - //nolint:gosec // Length is bounded by slice allocation - lastBucketIdx := bucket.Offset + int32(len(bucket.Counts)) - 1 - lastNewIdx := lastBucketIdx >> scaleDelta - newBucketCount := int(lastNewIdx - newOffset + 1) - - if newBucketCount <= 0 { - return metricdata.ExponentialBucket{ - Offset: newOffset, - Counts: []uint64{}, - } - } - - newCounts := make([]uint64, newBucketCount) - - // Merge buckets according to the scale difference - for i, count := range bucket.Counts { - if count == 0 { - continue - } - - // Calculate which new bucket this count belongs to - //nolint:gosec // Index is bounded by loop iteration - originalIdx := bucket.Offset + int32(i) - newIdx := originalIdx >> scaleDelta - - // Calculate the position in the new counts array - position := newIdx - newOffset - //nolint:gosec // Length is bounded by allocation - if position >= 0 && position < int32(len(newCounts)) { - newCounts[position] += count - } - } - - return metricdata.ExponentialBucket{ - Offset: newOffset, - Counts: newCounts, - } -} - func addExponentialHistogramMetric[N int64 | float64]( ch chan<- prometheus.Metric, histogram metricdata.ExponentialHistogram[N], m metricdata.Metrics, name string, kv keyVals, - validationScheme model.ValidationScheme, ) { for _, dp := range histogram.DataPoints { - keys, values := getAttrs(dp.Attributes, validationScheme) + keys, values := getAttrs(dp.Attributes) keys = append(keys, kv.keys...) values = append(values, kv.vals...) desc := prometheus.NewDesc(name, m.Description, keys, nil) - // Prometheus native histograms support scales in the range [-4, 8] - scale := dp.Scale - if scale < -4 { - // Reject scales below -4 as they cannot be represented in Prometheus - otel.Handle(fmt.Errorf( - "exponential histogram scale %d is below minimum supported scale -4, skipping data point", - scale)) - continue - } - - // If scale > 8, we need to downscale the buckets to match the clamped scale - positiveBucket := dp.PositiveBucket - negativeBucket := dp.NegativeBucket - if scale > 8 { - scaleDelta := scale - 8 - positiveBucket = downscaleExponentialBucket(dp.PositiveBucket, scaleDelta) - negativeBucket = downscaleExponentialBucket(dp.NegativeBucket, scaleDelta) - scale = 8 - } - // From spec: note that Prometheus Native Histograms buckets are indexed by upper boundary while Exponential Histograms are indexed by lower boundary, the result being that the Offset fields are different-by-one. positiveBuckets := make(map[int]int64) - for i, c := range positiveBucket.Counts { + for i, c := range dp.PositiveBucket.Counts { if c > math.MaxInt64 { otel.Handle(fmt.Errorf("positive count %d is too large to be represented as int64", c)) continue } - positiveBuckets[int(positiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. + positiveBuckets[int(dp.PositiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. } negativeBuckets := make(map[int]int64) - for i, c := range negativeBucket.Counts { + for i, c := range dp.NegativeBucket.Counts { if c > math.MaxInt64 { otel.Handle(fmt.Errorf("negative count %d is too large to be represented as int64", c)) continue } - negativeBuckets[int(negativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. + negativeBuckets[int(dp.NegativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. } m, err := prometheus.NewConstNativeHistogram( @@ -360,7 +299,7 @@ func addExponentialHistogramMetric[N int64 | float64]( positiveBuckets, negativeBuckets, dp.ZeroCount, - scale, + dp.Scale, dp.ZeroThreshold, dp.StartTime, values...) @@ -380,10 +319,9 @@ func addHistogramMetric[N int64 | float64]( m metricdata.Metrics, name string, kv keyVals, - validationScheme model.ValidationScheme, ) { for _, dp := range histogram.DataPoints { - keys, values := getAttrs(dp.Attributes, validationScheme) + keys, values := getAttrs(dp.Attributes) keys = append(keys, kv.keys...) values = append(values, kv.vals...) @@ -411,7 +349,6 @@ func addSumMetric[N int64 | float64]( m metricdata.Metrics, name string, kv keyVals, - validationScheme model.ValidationScheme, ) { valueType := prometheus.CounterValue if !sum.IsMonotonic { @@ -419,7 +356,7 @@ func addSumMetric[N int64 | float64]( } for _, dp := range sum.DataPoints { - keys, values := getAttrs(dp.Attributes, validationScheme) + keys, values := getAttrs(dp.Attributes) keys = append(keys, kv.keys...) values = append(values, kv.vals...) @@ -444,10 +381,9 @@ func addGaugeMetric[N int64 | float64]( m metricdata.Metrics, name string, kv keyVals, - validationScheme model.ValidationScheme, ) { for _, dp := range gauge.DataPoints { - keys, values := getAttrs(dp.Attributes, validationScheme) + keys, values := getAttrs(dp.Attributes) keys = append(keys, kv.keys...) values = append(values, kv.vals...) @@ -463,12 +399,12 @@ func addGaugeMetric[N int64 | float64]( // getAttrs converts the attribute.Set to two lists of matching Prometheus-style // keys and values. -func getAttrs(attrs attribute.Set, validationScheme model.ValidationScheme) ([]string, []string) { +func getAttrs(attrs attribute.Set) ([]string, []string) { keys := make([]string, 0, attrs.Len()) values := make([]string, 0, attrs.Len()) itr := attrs.Iter() - if validationScheme == model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. + if model.NameValidationScheme == model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. // Do not perform sanitization if prometheus supports UTF-8. for itr.Next() { kv := itr.Attribute() @@ -498,17 +434,21 @@ func getAttrs(attrs attribute.Set, validationScheme model.ValidationScheme) ([]s return keys, values } -func createInfoMetric(name, description string, res *resource.Resource, validationScheme model.ValidationScheme) (prometheus.Metric, error) { - keys, values := getAttrs(*res.Set(), validationScheme) +func createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) { + keys, values := getAttrs(*res.Set()) desc := prometheus.NewDesc(name, description, keys, nil) return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...) } -func unitMapGetOrDefault(unit string) string { - if promUnit, ok := unitSuffixes[unit]; ok { - return promUnit - } - return unit +func createScopeInfoMetric(scope instrumentation.Scope) (prometheus.Metric, error) { + attrs := make([]attribute.KeyValue, 0, scope.Attributes.Len()+2) // resource attrs + scope name + scope version + attrs = append(attrs, scope.Attributes.ToSlice()...) + attrs = append(attrs, attribute.String(scopeNameLabel, scope.Name)) + attrs = append(attrs, attribute.String(scopeVersionLabel, scope.Version)) + + keys, values := getAttrs(attribute.NewSet(attrs...)) + desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil) + return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...) } var unitSuffixes = map[string]string{ @@ -550,7 +490,7 @@ var unitSuffixes = map[string]string{ // getName returns the sanitized name, prefixed with the namespace and suffixed with unit. func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string { name := m.Name - if c.validationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. + if model.NameValidationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. // Only sanitize if prometheus does not support UTF-8. logDeprecatedLegacyScheme() name = model.EscapeName(name, model.NameEscapingScheme) @@ -569,7 +509,7 @@ func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string { if c.namespace != "" { name = c.namespace + name } - if suffix := unitMapGetOrDefault(m.Unit); suffix != "" && !c.withoutUnits && !strings.HasSuffix(name, suffix) { + if suffix, ok := unitSuffixes[m.Unit]; ok && !c.withoutUnits && !strings.HasSuffix(name, suffix) { name += "_" + suffix } if addCounterSuffix { @@ -612,10 +552,34 @@ func (c *collector) createResourceAttributes(res *resource.Resource) { defer c.mu.Unlock() resourceAttrs, _ := res.Set().Filter(c.resourceAttributesFilter) - resourceKeys, resourceValues := getAttrs(resourceAttrs, c.validationScheme) + resourceKeys, resourceValues := getAttrs(resourceAttrs) c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues} } +func (c *collector) scopeInfo(scope instrumentation.Scope) (prometheus.Metric, error) { + c.mu.Lock() + defer c.mu.Unlock() + + scopeInfo, ok := c.scopeInfos[scope] + if ok { + return scopeInfo, nil + } + + if _, ok := c.scopeInfosInvalid[scope]; ok { + return nil, errScopeInvalid + } + + scopeInfo, err := createScopeInfoMetric(scope) + if err != nil { + c.scopeInfosInvalid[scope] = struct{}{} + return nil, fmt.Errorf("cannot create scope info metric: %w", err) + } + + c.scopeInfos[scope] = scopeInfo + + return scopeInfo, nil +} + func (c *collector) validateMetrics(name, description string, metricType *dto.MetricType) (drop bool, help string) { c.mu.Lock() defer c.mu.Unlock() @@ -670,8 +634,7 @@ func addExemplars[N int64 | float64](m prometheus.Metric, exemplars []metricdata Labels: labels, } } - // TODO: Parameterize name validation scheme. - metricWithExemplar, err := newMetricWithExemplars(m, model.UTF8Validation, promExemplars...) + metricWithExemplar, err := prometheus.NewMetricWithExemplars(m, promExemplars...) if err != nil { // If there are errors creating the metric with exemplars, just warn // and return the metric without exemplars. diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_globalvalidationscheme.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_globalvalidationscheme.go deleted file mode 100644 index 0d71a9e8677..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_globalvalidationscheme.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build !localvalidationscheme - -package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" -) - -func newMetricWithExemplars(m prometheus.Metric, scheme model.ValidationScheme, exemplars ...prometheus.Exemplar) (prometheus.Metric, error) { - return prometheus.NewMetricWithExemplars(m, exemplars...) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_localvalidationscheme.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_localvalidationscheme.go deleted file mode 100644 index d8a61093470..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter_localvalidationscheme.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build localvalidationscheme - -package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" -) - -func newMetricWithExemplars(m prometheus.Metric, scheme model.ValidationScheme, exemplars ...prometheus.Exemplar) (prometheus.Metric, error) { - return prometheus.NewMetricWithExemplars(m, scheme, exemplars...) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index 0a48aed74dd..ebb9a0463b3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -202,7 +202,7 @@ func (r *PeriodicReader) aggregation( // collectAndExport gather all metric data related to the periodicReader r from // the SDK and exports it with r's exporter. func (r *PeriodicReader) collectAndExport(ctx context.Context) error { - ctx, cancel := context.WithTimeoutCause(ctx, r.timeout, errors.New("reader collect and export timeout")) + ctx, cancel := context.WithTimeout(ctx, r.timeout) defer cancel() // TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect. @@ -278,7 +278,7 @@ func (r *PeriodicReader) ForceFlush(ctx context.Context) error { // Prioritize the ctx timeout if it is set. if _, ok := ctx.Deadline(); !ok { var cancel context.CancelFunc - ctx, cancel = context.WithTimeoutCause(ctx, r.timeout, errors.New("reader force flush timeout")) + ctx, cancel = context.WithTimeout(ctx, r.timeout) defer cancel() } @@ -311,7 +311,7 @@ func (r *PeriodicReader) Shutdown(ctx context.Context) error { // Prioritize the ctx timeout if it is set. if _, ok := ctx.Deadline(); !ok { var cancel context.CancelFunc - ctx, cancel = context.WithTimeoutCause(ctx, r.timeout, errors.New("reader shutdown timeout")) + ctx, cancel = context.WithTimeout(ctx, r.timeout) defer cancel() } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index 7bdb699cae0..2240c26e9b4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -121,14 +121,6 @@ func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) { // // This method is safe to call concurrently. func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error { - // Only check if context is already cancelled before starting, not inside or after callback loops. - // If this method returns after executing some callbacks but before running all aggregations, - // internal aggregation state can be corrupted and result in incorrect data returned - // by future produce calls. - if err := ctx.Err(); err != nil { - return err - } - p.Lock() defer p.Unlock() @@ -138,6 +130,12 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) if e := c(ctx); e != nil { err = errors.Join(err, e) } + if err := ctx.Err(); err != nil { + rm.Resource = nil + clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. + rm.ScopeMetrics = rm.ScopeMetrics[:0] + return err + } } for e := p.multiCallbacks.Front(); e != nil; e = e.Next() { // TODO make the callbacks parallel. ( #3034 ) @@ -145,6 +143,13 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) if e := f(ctx); e != nil { err = errors.Join(err, e) } + if err := ctx.Err(); err != nil { + // This means the context expired before we finished running callbacks. + rm.Resource = nil + clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. + rm.ScopeMetrics = rm.ScopeMetrics[:0] + return err + } } rm.Resource = p.resource diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index 0e5adc1a766..cda142c7ea2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.37.0" + return "1.36.0" } diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 425ec574d65..c9455b50b9b 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.239.0" +const Version = "0.238.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index 6bf73df1dea..53dd6d84475 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1169,7 +1169,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.2 ## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.65.1-0.20250714091050-c6ae72fb63e9 +# github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 ## explicit; go 1.23.0 github.com/prometheus/common/config github.com/prometheus/common/expfmt @@ -1192,7 +1192,7 @@ github.com/prometheus/otlptranslator github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v1.8.2-0.20250725113505-6dd7af9abc56 +# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v1.8.2-0.20250717103207-acaa45ca4d40 ## explicit; go 1.23.0 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1509,7 +1509,7 @@ go.opentelemetry.io/collector/semconv/v1.6.1 # go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/bridges/otelzap -# go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 => github.com/aknuds1/opentelemetry-go-contrib/bridges/prometheus v0.0.0-20250716061915-e4a04e1efdd8 +# go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/bridges/prometheus # go.opentelemetry.io/contrib/detectors/gcp v1.36.0 @@ -1540,7 +1540,7 @@ go.opentelemetry.io/contrib/propagators/jaeger ## explicit; go 1.23.0 go.opentelemetry.io/contrib/samplers/jaegerremote go.opentelemetry.io/contrib/samplers/jaegerremote/internal/utils -# go.opentelemetry.io/otel v1.37.0 => github.com/aknuds1/opentelemetry-go v0.0.0-20250714105753-6d10dabef4d5 +# go.opentelemetry.io/otel v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1608,7 +1608,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry -# go.opentelemetry.io/otel/exporters/prometheus v0.58.0 => github.com/aknuds1/opentelemetry-go/exporters/prometheus v0.0.0-20250714105753-6d10dabef4d5 +# go.opentelemetry.io/otel/exporters/prometheus v0.58.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/prometheus # go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 @@ -1646,7 +1646,7 @@ go.opentelemetry.io/otel/sdk/trace/tracetest # go.opentelemetry.io/otel/sdk/log v0.12.2 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk/log -# go.opentelemetry.io/otel/sdk/metric v1.37.0 +# go.opentelemetry.io/otel/sdk/metric v1.36.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk/metric go.opentelemetry.io/otel/sdk/metric/exemplar @@ -1798,7 +1798,7 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/api v0.239.0 +# google.golang.org/api v0.238.0 ## explicit; go 1.23.0 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -2112,7 +2112,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v1.8.2-0.20250725113505-6dd7af9abc56 +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v1.8.2-0.20250717103207-acaa45ca4d40 # github.com/prometheus/alertmanager => github.com/juliusmh/alertmanager v0.26.1-0.20250624114102-96969065f8d2 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20250428154222-f7d51a6f6700 # gopkg.in/yaml.v3 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 @@ -2122,6 +2122,3 @@ sigs.k8s.io/yaml/goyaml.v3 # github.com/opentracing-contrib/go-grpc => github.com/charleskorn/go-grpc v0.0.0-20231024023642-e9298576254f # github.com/prometheus/otlptranslator => github.com/grafana/mimir-otlptranslator v0.0.0-20250703083430-c31a9568ad96 # github.com/thanos-io/objstore => github.com/charleskorn/objstore v0.0.0-20250527065533-21d4c0c463eb -# go.opentelemetry.io/contrib/bridges/prometheus => github.com/aknuds1/opentelemetry-go-contrib/bridges/prometheus v0.0.0-20250716061915-e4a04e1efdd8 -# go.opentelemetry.io/otel/exporters/prometheus => github.com/aknuds1/opentelemetry-go/exporters/prometheus v0.0.0-20250714105753-6d10dabef4d5 -# go.opentelemetry.io/otel => github.com/aknuds1/opentelemetry-go v0.0.0-20250714105753-6d10dabef4d5 From 2347f899056ee865e89648e40cbba58b2235175c Mon Sep 17 00:00:00 2001 From: Julius Hinze Date: Mon, 21 Jul 2025 11:55:20 +0200 Subject: [PATCH 03/10] chore: fix failing tests --- pkg/mimirtool/commands/analyse_rulefiles_test.go | 2 +- pkg/mimirtool/commands/analyse_rules_test.go | 3 ++- pkg/mimirtool/rules/parser_test.go | 3 ++- pkg/streamingpromql/planning.go | 11 ++++++----- pkg/util/validation/limits_test.go | 1 + .../main.go | 3 ++- 6 files changed, 14 insertions(+), 9 deletions(-) diff --git a/pkg/mimirtool/commands/analyse_rulefiles_test.go b/pkg/mimirtool/commands/analyse_rulefiles_test.go index fdf9d4edd3e..7a3948ce989 100644 --- a/pkg/mimirtool/commands/analyse_rulefiles_test.go +++ b/pkg/mimirtool/commands/analyse_rulefiles_test.go @@ -10,7 +10,7 @@ import ( ) func TestAnalyzeRuleFiles(t *testing.T) { - mir, err := AnalyzeRuleFiles([]string{"testdata/prometheus_rules.yaml"}) + mir, err := AnalyzeRuleFiles([]string{"testdata/prometheus_rules.yaml"}, model.UTF8Validation) require.NoError(t, err) require.Equal(t, 28, len(mir.MetricsUsed)) expectedMetrics := model.LabelValues{ diff --git a/pkg/mimirtool/commands/analyse_rules_test.go b/pkg/mimirtool/commands/analyse_rules_test.go index 6dda7489232..de345b99097 100644 --- a/pkg/mimirtool/commands/analyse_rules_test.go +++ b/pkg/mimirtool/commands/analyse_rules_test.go @@ -9,6 +9,7 @@ import ( "slices" "testing" + "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -56,7 +57,7 @@ func TestParseMetricsInRuleFile(t *testing.T) { output := &analyze.MetricsInRuler{} output.OverallMetrics = make(map[string]struct{}) - nss, err := rules.ParseFiles("mimir", []string{"testdata/prometheus_rules.yaml"}) + nss, err := rules.ParseFiles("mimir", []string{"testdata/prometheus_rules.yaml"}, model.UTF8Validation) require.NoError(t, err) for _, ns := range nss { diff --git a/pkg/mimirtool/rules/parser_test.go b/pkg/mimirtool/rules/parser_test.go index f8aa220f87a..cceb04d455e 100644 --- a/pkg/mimirtool/rules/parser_test.go +++ b/pkg/mimirtool/rules/parser_test.go @@ -9,6 +9,7 @@ import ( "fmt" "testing" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/rulefmt" "github.com/grafana/mimir/pkg/mimirtool/rules/rwrulefmt" @@ -123,7 +124,7 @@ func TestParseFiles(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := ParseFiles(tt.backend, tt.files) + got, err := ParseFiles(tt.backend, tt.files, model.UTF8Validation) if (err != nil) != tt.wantErr { t.Errorf("ParseFiles() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/pkg/streamingpromql/planning.go b/pkg/streamingpromql/planning.go index e7390b6f011..7f87e4eea47 100644 --- a/pkg/streamingpromql/planning.go +++ b/pkg/streamingpromql/planning.go @@ -126,13 +126,14 @@ func (p *QueryPlanner) NewQueryPlan(ctx context.Context, qs string, timeRange ty } expr, err = p.runASTStage("Pre-processing", observer, func() (parser.Expr, error) { - start, end := timestamp.Time(timeRange.StartT), timestamp.Time(timeRange.EndT) - interval := time.Duration(timeRange.IntervalMilliseconds) * time.Millisecond + step := time.Duration(timeRange.IntervalMilliseconds) * time.Millisecond + if timeRange.IsInstant { - // Prometheus expects interval to be zero for instant queries but we use 1. - interval = 0 + // timeRange.IntervalMilliseconds is 1 for instant queries, but we need to pass 0 for instant queries to PreprocessExpr. + step = 0 } - return promql.PreprocessExpr(expr, start, end, interval) + + return promql.PreprocessExpr(expr, timestamp.Time(timeRange.StartT), timestamp.Time(timeRange.EndT), step) }) if err != nil { diff --git a/pkg/util/validation/limits_test.go b/pkg/util/validation/limits_test.go index a5c5072cd7b..bcbeeced255 100644 --- a/pkg/util/validation/limits_test.go +++ b/pkg/util/validation/limits_test.go @@ -163,6 +163,7 @@ metric_relabel_configs: require.NoError(t, err) exp.Regex = regex exp.SourceLabels = model.LabelNames([]model.LabelName{"le"}) + exp.MetricNameValidationScheme = model.UTF8Validation l := Limits{} dec := yaml.NewDecoder(strings.NewReader(inp)) diff --git a/tools/check-for-disabled-but-supported-mqe-test-cases/main.go b/tools/check-for-disabled-but-supported-mqe-test-cases/main.go index b59849f8493..64cf37af051 100644 --- a/tools/check-for-disabled-but-supported-mqe-test-cases/main.go +++ b/tools/check-for-disabled-but-supported-mqe-test-cases/main.go @@ -16,6 +16,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/regexp" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -58,7 +59,7 @@ func run() error { } opts := streamingpromql.NewTestEngineOpts() - engine, err := streamingpromql.NewEngine(opts, streamingpromql.NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), streamingpromql.NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := streamingpromql.NewEngine(opts, streamingpromql.NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), streamingpromql.NewQueryPlanner(opts), log.NewNopLogger()) if err != nil { return fmt.Errorf("could not create engine: %w", err) } From e8ab43d7f6c77d11cd18b3aba7decafdde36d776 Mon Sep 17 00:00:00 2001 From: Julius Hinze Date: Mon, 21 Jul 2025 11:56:19 +0200 Subject: [PATCH 04/10] streamingpromql: use label/metric name validation scheme from limits (rather than NameValidatingEngine) --- .../querysharding_test_utils_test.go | 2 +- pkg/mimir/modules.go | 9 ++-- pkg/querier/querier.go | 24 +++++++-- .../benchmarks/comparison_test.go | 7 +-- pkg/streamingpromql/engine.go | 11 +++- .../engine_concurrency_test.go | 3 +- pkg/streamingpromql/engine_test.go | 50 +++++++++---------- pkg/streamingpromql/functions_test.go | 3 +- pkg/streamingpromql/query.go | 7 ++- 9 files changed, 74 insertions(+), 42 deletions(-) diff --git a/pkg/frontend/querymiddleware/querysharding_test_utils_test.go b/pkg/frontend/querymiddleware/querysharding_test_utils_test.go index 543148ce81a..0e55f1c375e 100644 --- a/pkg/frontend/querymiddleware/querysharding_test_utils_test.go +++ b/pkg/frontend/querymiddleware/querysharding_test_utils_test.go @@ -333,7 +333,7 @@ func newEngineForTesting(t *testing.T, engine string, opts ...engineOpt) (promql case querier.PrometheusEngine: return promOpts, promql.NewEngine(promOpts) case querier.MimirEngine: - limits := streamingpromql.NewStaticQueryLimitsProvider(0) + limits := streamingpromql.NewStaticQueryLimitsProvider(0, model.UTF8Validation) metrics := stats.NewQueryMetrics(promOpts.Reg) planner := streamingpromql.NewQueryPlanner(mqeOpts) logger := log.NewNopLogger() diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go index 31294c1de83..0920a66e8ee 100644 --- a/pkg/mimir/modules.go +++ b/pkg/mimir/modules.go @@ -28,6 +28,7 @@ import ( "github.com/prometheus/alertmanager/matchers/compat" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/rules" @@ -805,9 +806,10 @@ func (t *Mimir) initQueryFrontendTripperware() (serv services.Service, err error var eng promql.QueryEngine switch t.Cfg.Frontend.QueryEngine { case querier.PrometheusEngine: - eng = promql.NewEngine(promOpts) + // TODO: Decide whether this is a good idea. + eng = streamingpromqlcompat.NameValidatingEngine(promql.NewEngine(promOpts), t.Overrides) case querier.MimirEngine: - streamingEngine, err := streamingpromql.NewEngine(mqeOpts, streamingpromql.NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(mqeOpts.CommonOpts.Reg), t.QueryPlanner, util_log.Logger) + streamingEngine, err := streamingpromql.NewEngine(mqeOpts, streamingpromql.NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(mqeOpts.CommonOpts.Reg), t.QueryPlanner, util_log.Logger) if err != nil { return nil, fmt.Errorf("unable to create Mimir Query Engine: %w", err) } @@ -821,9 +823,6 @@ func (t *Mimir) initQueryFrontendTripperware() (serv services.Service, err error panic(fmt.Sprintf("invalid config not caught by validation: unknown PromQL engine '%s'", t.Cfg.Querier.QueryEngine)) } - // TODO: Decide whether this is a good idea. - eng = streamingpromqlcompat.NameValidatingEngine(eng, t.Overrides) - tripperware, err := querymiddleware.NewTripperware( t.Cfg.Frontend.QueryMiddleware, util_log.Logger, diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 77b0a4b5d5a..ed838a52714 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -185,7 +185,8 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, quer switch cfg.QueryEngine { case PrometheusEngine: - eng = promql.NewEngine(opts) + // TODO: Check whether this approach is a good idea. + eng = compat.NameValidatingEngine(promql.NewEngine(opts), limits) case MimirEngine: limitsProvider := NewTenantQueryLimitsProvider(limits) streamingEngine, err := streamingpromql.NewEngine(mqeOpts, limitsProvider, queryMetrics, planner, logger) @@ -194,7 +195,8 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, quer } if cfg.EnableQueryEngineFallback { - prometheusEngine := promql.NewEngine(opts) + // TODO: Check whether this approach is a good idea. + prometheusEngine := compat.NameValidatingEngine(promql.NewEngine(opts), limits) eng = compat.NewEngineWithFallback(streamingEngine, prometheusEngine, reg, logger) } else { eng = streamingEngine @@ -203,8 +205,6 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, quer panic(fmt.Sprintf("invalid config not caught by validation: unknown PromQL engine '%s'", cfg.QueryEngine)) } - // TODO: Check whether this approach is a good idea. - eng = compat.NameValidatingEngine(eng, limits) return NewSampleAndChunkQueryable(lazyQueryable), exemplarQueryable, eng, nil } @@ -795,3 +795,19 @@ func (p *TenantQueryLimitsProvider) GetMaxEstimatedMemoryConsumptionPerQuery(ctx return totalLimit, nil } + +// GetValidationScheme computes the validation scheme for tenants injected into ctx. Returns LegacyValidation if +// at least one tenant uses LegacyValidation, UTF8Validation otherwise. +func (p *TenantQueryLimitsProvider) GetValidationScheme(ctx context.Context) (model.ValidationScheme, error) { + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return 0, err + } + for _, tenantID := range tenantIDs { + validationScheme := p.limits.ValidationScheme(tenantID) + if validationScheme == model.LegacyValidation { + return validationScheme, nil + } + } + return model.UTF8Validation, nil +} diff --git a/pkg/streamingpromql/benchmarks/comparison_test.go b/pkg/streamingpromql/benchmarks/comparison_test.go index 2cfb59927fc..979d8b38adc 100644 --- a/pkg/streamingpromql/benchmarks/comparison_test.go +++ b/pkg/streamingpromql/benchmarks/comparison_test.go @@ -20,6 +20,7 @@ import ( "github.com/grafana/dskit/services" "github.com/grafana/dskit/test" "github.com/grafana/dskit/user" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" @@ -43,7 +44,7 @@ func BenchmarkQuery(b *testing.B) { opts := streamingpromql.NewTestEngineOpts() prometheusEngine := promql.NewEngine(opts.CommonOpts) - mimirEngine, err := streamingpromql.NewEngine(opts, streamingpromql.NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), streamingpromql.NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := streamingpromql.NewEngine(opts, streamingpromql.NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), streamingpromql.NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(b, err) // Important: the names below must remain in sync with the names used in tools/benchmark-query-engine. @@ -95,7 +96,7 @@ func TestBothEnginesReturnSameResultsForBenchmarkQueries(t *testing.T) { opts := streamingpromql.NewTestEngineOpts() prometheusEngine := promql.NewEngine(opts.CommonOpts) - limitsProvider := streamingpromql.NewStaticQueryLimitsProvider(0) + limitsProvider := streamingpromql.NewStaticQueryLimitsProvider(0, model.UTF8Validation) queryMetrics := stats.NewQueryMetrics(nil) mimirEngine, err := streamingpromql.NewEngine(opts, limitsProvider, queryMetrics, streamingpromql.NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) @@ -124,7 +125,7 @@ func TestBenchmarkSetup(t *testing.T) { q := createBenchmarkQueryable(t, []int{1}) opts := streamingpromql.NewTestEngineOpts() - mimirEngine, err := streamingpromql.NewEngine(opts, streamingpromql.NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), streamingpromql.NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := streamingpromql.NewEngine(opts, streamingpromql.NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), streamingpromql.NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) ctx := user.InjectOrgID(context.Background(), UserID) diff --git a/pkg/streamingpromql/engine.go b/pkg/streamingpromql/engine.go index 958511dcb01..998e99ef560 100644 --- a/pkg/streamingpromql/engine.go +++ b/pkg/streamingpromql/engine.go @@ -15,6 +15,7 @@ import ( "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" "go.opentelemetry.io/otel" @@ -143,25 +144,33 @@ func (e *Engine) newQueryFromPlanner(ctx context.Context, q storage.Queryable, o type QueryLimitsProvider interface { // GetMaxEstimatedMemoryConsumptionPerQuery returns the maximum estimated memory allowed to be consumed by a query in bytes, or 0 to disable the limit. GetMaxEstimatedMemoryConsumptionPerQuery(ctx context.Context) (uint64, error) + // GetValidationScheme returns the label/metric name validation scheme to use for a query. + GetValidationScheme(ctx context.Context) (model.ValidationScheme, error) } // NewStaticQueryLimitsProvider returns a QueryLimitsProvider that always returns the provided limits. // // This should generally only be used in tests. -func NewStaticQueryLimitsProvider(maxEstimatedMemoryConsumptionPerQuery uint64) QueryLimitsProvider { +func NewStaticQueryLimitsProvider(maxEstimatedMemoryConsumptionPerQuery uint64, validationScheme model.ValidationScheme) QueryLimitsProvider { return staticQueryLimitsProvider{ maxEstimatedMemoryConsumptionPerQuery: maxEstimatedMemoryConsumptionPerQuery, + validationScheme: validationScheme, } } type staticQueryLimitsProvider struct { maxEstimatedMemoryConsumptionPerQuery uint64 + validationScheme model.ValidationScheme } func (p staticQueryLimitsProvider) GetMaxEstimatedMemoryConsumptionPerQuery(_ context.Context) (uint64, error) { return p.maxEstimatedMemoryConsumptionPerQuery, nil } +func (p staticQueryLimitsProvider) GetValidationScheme(_ context.Context) (model.ValidationScheme, error) { + return p.validationScheme, nil +} + type NoopQueryTracker struct{} func (n *NoopQueryTracker) GetMaxConcurrent() int { diff --git a/pkg/streamingpromql/engine_concurrency_test.go b/pkg/streamingpromql/engine_concurrency_test.go index 2ba1abdcd95..8430fc68efc 100644 --- a/pkg/streamingpromql/engine_concurrency_test.go +++ b/pkg/streamingpromql/engine_concurrency_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/promqltest" @@ -187,7 +188,7 @@ func TestConcurrentQueries(t *testing.T) { t.Cleanup(func() { require.NoError(t, storage.Close()) }) opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) // Populate the expected result for each query. diff --git a/pkg/streamingpromql/engine_test.go b/pkg/streamingpromql/engine_test.go index f5e8392cc81..315ed8b3068 100644 --- a/pkg/streamingpromql/engine_test.go +++ b/pkg/streamingpromql/engine_test.go @@ -95,7 +95,7 @@ func requireQueryIsUnsupported(t *testing.T, expression string, expectedError st func requireRangeQueryIsUnsupported(t *testing.T, expression string, expectedError string) { opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) qry, err := engine.NewRangeQuery(context.Background(), nil, nil, expression, time.Now().Add(-time.Hour), time.Now(), time.Minute) @@ -106,7 +106,7 @@ func requireRangeQueryIsUnsupported(t *testing.T, expression string, expectedErr func requireInstantQueryIsUnsupported(t *testing.T, expression string, expectedError string) { opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) qry, err := engine.NewInstantQuery(context.Background(), nil, nil, expression, time.Now()) @@ -118,7 +118,7 @@ func requireInstantQueryIsUnsupported(t *testing.T, expression string, expectedE func TestNewRangeQuery_InvalidQueryTime(t *testing.T) { opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) ctx := context.Background() @@ -132,7 +132,7 @@ func TestNewRangeQuery_InvalidQueryTime(t *testing.T) { func TestNewRangeQuery_InvalidExpressionTypes(t *testing.T) { opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) ctx := context.Background() @@ -148,7 +148,7 @@ func TestNewInstantQuery_Strings(t *testing.T) { opts := NewTestEngineOpts() prometheusEngine := promql.NewEngine(opts.CommonOpts) - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) storage := promqltest.LoadedStorage(t, ``) @@ -172,7 +172,7 @@ func TestNewInstantQuery_Strings(t *testing.T) { // Once the streaming engine supports all PromQL features exercised by Prometheus' test cases, we can remove these files and instead call promql.RunBuiltinTests here instead. func TestUpstreamTestCases(t *testing.T) { opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) testdataFS := os.DirFS("./testdata") @@ -196,7 +196,7 @@ func TestUpstreamTestCases(t *testing.T) { func TestOurTestCases(t *testing.T) { opts := NewTestEngineOpts() - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) prometheusEngine := promql.NewEngine(opts.CommonOpts) @@ -239,7 +239,7 @@ func TestOurTestCases(t *testing.T) { func TestRangeVectorSelectors(t *testing.T) { opts := NewTestEngineOpts() prometheusEngine := promql.NewEngine(opts.CommonOpts) - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) baseT := timestamp.Time(0) @@ -785,7 +785,7 @@ func TestSubqueries(t *testing.T) { opts := NewTestEngineOpts() opts.CommonOpts.EnablePerStepStats = true prometheusEngine := promql.NewEngine(opts.CommonOpts) - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) storage := promqltest.LoadedStorage(t, data) @@ -1209,7 +1209,7 @@ func TestSubqueries(t *testing.T) { func TestQueryCancellation(t *testing.T) { opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) // Simulate the query being cancelled by another goroutine by waiting for the Select() call to be made, @@ -1237,7 +1237,7 @@ func TestQueryCancellation(t *testing.T) { func TestQueryTimeout(t *testing.T) { opts := NewTestEngineOpts() opts.CommonOpts.Timeout = 20 * time.Millisecond - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) // Simulate the query doing some work and check that the query context has been cancelled. @@ -1303,7 +1303,7 @@ func (w cancellationQuerier) waitForCancellation(ctx context.Context) error { func TestQueryContextCancelledOnceQueryFinished(t *testing.T) { opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) storage := promqltest.LoadedStorage(t, ` @@ -1513,7 +1513,7 @@ func TestMemoryConsumptionLimit_SingleQueries(t *testing.T) { opts := NewTestEngineOpts() opts.CommonOpts.Reg = reg - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(limit), stats.NewQueryMetrics(reg), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(limit, model.UTF8Validation), stats.NewQueryMetrics(reg), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) spanExporter.Reset() @@ -1632,7 +1632,7 @@ func TestMemoryConsumptionLimit_MultipleQueries(t *testing.T) { opts.CommonOpts.Reg = reg limit := 32*types.FPointSize + 4*types.SeriesMetadataSize + 3*uint64(labels.FromStrings(labels.MetricName, "some_metric", "idx", "i").ByteSize()) - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(limit), stats.NewQueryMetrics(reg), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(limit, model.UTF8Validation), stats.NewQueryMetrics(reg), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) runQuery := func(expr string, shouldSucceed bool) { @@ -1704,7 +1704,7 @@ func TestActiveQueryTracker_SuccessfulQuery(t *testing.T) { opts.CommonOpts.ActiveQueryTracker = tracker planner := NewQueryPlanner(opts) - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), planner, log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), planner, log.NewNopLogger()) require.NoError(t, err) testActiveQueryTracker( @@ -1765,7 +1765,7 @@ func TestActiveQueryTracker_FailedQuery(t *testing.T) { opts := NewTestEngineOpts() tracker := &testQueryTracker{} opts.CommonOpts.ActiveQueryTracker = tracker - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) innerStorage := promqltest.LoadedStorage(t, "") @@ -1856,7 +1856,7 @@ func TestActiveQueryTracker_WaitingForTrackerIncludesQueryTimeout(t *testing.T) opts := NewTestEngineOpts() opts.CommonOpts.Timeout = 10 * time.Millisecond opts.CommonOpts.ActiveQueryTracker = tracker - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) queryTypes := map[string]func() (promql.Query, error){ @@ -1933,7 +1933,7 @@ func runAnnotationTests(t *testing.T, testCases map[string]annotationTestCase) { endT := startT.Add(2 * step) opts := NewTestEngineOpts() - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) prometheusEngine := promql.NewEngine(opts.CommonOpts) @@ -2948,7 +2948,7 @@ func runMixedMetricsTests(t *testing.T, expressions []string, pointsPerSeries in // - Look backs opts := NewTestEngineOpts() - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) prometheusEngine := promql.NewEngine(opts.CommonOpts) @@ -3186,7 +3186,7 @@ func TestCompareVariousMixedMetricsComparisonOps(t *testing.T) { func TestQueryStats(t *testing.T) { opts := NewTestEngineOpts() opts.CommonOpts.EnablePerStepStats = true - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) prometheusEngine := promql.NewEngine(opts.CommonOpts) @@ -3490,7 +3490,7 @@ func TestQueryStatsUpstreamTestCases(t *testing.T) { // TestCases are taken from Prometheus' TestQueryStatistics. opts := NewTestEngineOpts() opts.CommonOpts.EnablePerStepStats = true - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) prometheusEngine := promql.NewEngine(opts.CommonOpts) @@ -3872,7 +3872,7 @@ func TestQueryStatsUpstreamTestCases(t *testing.T) { } func TestQueryStatementLookbackDelta(t *testing.T) { - limitsProvider := NewStaticQueryLimitsProvider(0) + limitsProvider := NewStaticQueryLimitsProvider(0, model.UTF8Validation) stats := stats.NewQueryMetrics(nil) logger := log.NewNopLogger() @@ -3939,7 +3939,7 @@ func TestQueryClose(t *testing.T) { t.Cleanup(func() { require.NoError(t, storage.Close()) }) opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) start := timestamp.Time(0) @@ -3971,7 +3971,7 @@ func TestEagerLoadSelectors(t *testing.T) { t.Cleanup(func() { require.NoError(t, storage.Close()) }) - limitsProvider := NewStaticQueryLimitsProvider(0) + limitsProvider := NewStaticQueryLimitsProvider(0, model.UTF8Validation) metrics := stats.NewQueryMetrics(nil) logger := log.NewNopLogger() optsWithoutEagerLoading := NewTestEngineOpts() @@ -4096,7 +4096,7 @@ func TestInstantQueryDurationExpression(t *testing.T) { opts := NewTestEngineOpts() prometheusEngine := promql.NewEngine(opts.CommonOpts) - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) ctx := context.Background() diff --git a/pkg/streamingpromql/functions_test.go b/pkg/streamingpromql/functions_test.go index 1585b91d1fc..4d63430d312 100644 --- a/pkg/streamingpromql/functions_test.go +++ b/pkg/streamingpromql/functions_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/promqltest" @@ -29,7 +30,7 @@ func TestFunctionDeduplicateAndMerge(t *testing.T) { storage := promqltest.LoadedStorage(t, data) opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) ctx := context.Background() diff --git a/pkg/streamingpromql/query.go b/pkg/streamingpromql/query.go index 01c707ea0ac..4a3bb74c83d 100644 --- a/pkg/streamingpromql/query.go +++ b/pkg/streamingpromql/query.go @@ -74,6 +74,11 @@ func (e *Engine) newQuery(ctx context.Context, queryable storage.Queryable, opts return nil, fmt.Errorf("could not get memory consumption limit for query: %w", err) } + validationScheme, err := e.limitsProvider.GetValidationScheme(ctx) + if err != nil { + return nil, fmt.Errorf("could not get validation scheme for query: %w", err) + } + memoryConsumptionTracker := limiter.NewMemoryConsumptionTracker(ctx, maxEstimatedMemoryConsumptionPerQuery, e.queriesRejectedDueToPeakMemoryConsumption, originalExpression) stats, err := types.NewQueryStats(timeRange, e.enablePerStepStats && opts.EnablePerStepStats(), memoryConsumptionTracker) if err != nil { @@ -88,7 +93,7 @@ func (e *Engine) newQuery(ctx context.Context, queryable storage.Queryable, opts topLevelQueryTimeRange: timeRange, lookbackDelta: lookbackDelta, originalExpression: originalExpression, - nameValidationScheme: opts.ValidationScheme(), + nameValidationScheme: validationScheme, } return q, nil From a37b3be608a50c87c013922daf83cd4db6829101 Mon Sep 17 00:00:00 2001 From: Julius Hinze Date: Tue, 22 Jul 2025 10:09:05 +0200 Subject: [PATCH 05/10] deps: update alertmanager; prometheus-mimir --- go.mod | 12 +- go.sum | 28 +- .../alertmanager/config/notifiers.go | 2 +- .../alertmanager/notify/webhook/webhook.go | 3 +- .../prometheus/common/promslog/slog.go | 14 + .../prometheus/prometheus/config/config.go | 69 ++- .../prometheus/discovery/manager.go | 22 +- .../{labels.go => labels_slicelabels.go} | 0 .../prometheus/model/relabel/relabel.go | 13 +- .../prometheus/model/rulefmt/rulefmt.go | 23 +- .../prometheus/prometheus/notifier/manager.go | 32 +- .../prometheus/prometheus/promql/durations.go | 14 +- .../prometheus/prometheus/promql/engine.go | 105 ++--- .../prometheus/prometheus/promql/functions.go | 419 +++++++++--------- .../promql/parser/generated_parser.y | 6 +- .../promql/parser/generated_parser.y.go | 6 +- .../prometheus/promql/parser/parse.go | 11 - .../promqltest/testdata/aggregators.test | 152 +++++-- .../promqltest/testdata/at_modifier.test | 3 +- .../promql/promqltest/testdata/collision.test | 3 +- .../promql/promqltest/testdata/functions.test | 189 ++++++-- .../promqltest/testdata/histograms.test | 121 ++++- .../promql/promqltest/testdata/limit.test | 8 +- .../testdata/name_label_dropping.test | 3 +- .../testdata/native_histograms.test | 225 +++++++--- .../promql/promqltest/testdata/operators.test | 231 +++++++--- .../promql/promqltest/testdata/subquery.test | 2 + .../prometheus/prometheus/promql/value.go | 8 +- .../prometheus/prometheus/rules/manager.go | 4 +- .../prometheus/prometheus/scrape/manager.go | 5 +- .../prometheus/prometheus/scrape/scrape.go | 39 +- .../prometheusremotewrite/helper.go | 11 +- .../prometheusremotewrite/metrics_to_prw.go | 4 +- .../storage/remote/write_handler.go | 7 +- .../prometheus/template/template.go | 13 +- .../tsdb/chunkenc/float_histogram.go | 134 +++--- .../prometheus/tsdb/chunkenc/histogram.go | 134 +++--- .../tsdb/chunkenc/histogram_meta.go | 163 ++----- .../prometheus/prometheus/tsdb/compact.go | 8 - .../util/annotations/annotations.go | 2 +- .../prometheus/prometheus/web/api/v1/api.go | 3 +- .../google.golang.org/api/internal/version.go | 2 +- vendor/modules.txt | 12 +- 43 files changed, 1358 insertions(+), 907 deletions(-) rename vendor/github.com/prometheus/prometheus/model/labels/{labels.go => labels_slicelabels.go} (100%) diff --git a/go.mod b/go.mod index d1626baa431..5162423cf61 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/prometheus/alertmanager v0.28.1 github.com/prometheus/client_golang v1.23.0-rc.1 github.com/prometheus/client_model v0.6.2 - github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 + github.com/prometheus/common v0.65.1-0.20250711183725-0e1982f10d4c github.com/prometheus/prometheus v1.99.0 github.com/segmentio/fasthash v1.0.3 github.com/sirupsen/logrus v1.9.3 @@ -93,7 +93,7 @@ require ( go.opentelemetry.io/proto/otlp v1.6.0 go.uber.org/multierr v1.11.0 golang.org/x/term v0.32.0 - google.golang.org/api v0.238.0 + google.golang.org/api v0.239.0 google.golang.org/protobuf v1.36.6 sigs.k8s.io/kustomize/kyaml v0.18.1 ) @@ -343,10 +343,7 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect ) -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v1.8.2-0.20250717103207-acaa45ca4d40 - -// https://github.com/grafana/prometheus-alertmanager/pull/118 -replace github.com/prometheus/alertmanager => github.com/juliusmh/alertmanager v0.26.1-0.20250624114102-96969065f8d2 +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v1.8.2-0.20250725123259-c4bd4faba234 // Replace memberlist with our fork which includes some fixes that haven't been // merged upstream yet: @@ -372,6 +369,9 @@ replace github.com/opentracing-contrib/go-stdlib => github.com/grafana/opentraci // Replace opentracing-contrib/go-grpc with a fork until https://github.com/opentracing-contrib/go-grpc/pull/16 is merged. replace github.com/opentracing-contrib/go-grpc => github.com/charleskorn/go-grpc v0.0.0-20231024023642-e9298576254f +// Replacing prometheus/alertmanager with our fork. +replace github.com/prometheus/alertmanager => github.com/grafana/prometheus-alertmanager v0.25.1-0.20250722103749-329f0c4df1ba + // Use Mimir fork of prometheus/otlptranslator to allow for higher velocity of upstream development, // while allowing Mimir to move at a more conservative pace. replace github.com/prometheus/otlptranslator => github.com/grafana/mimir-otlptranslator v0.0.0-20250703083430-c31a9568ad96 diff --git a/go.sum b/go.sum index 58a36810ce2..da525c9b889 100644 --- a/go.sum +++ b/go.sum @@ -273,14 +273,14 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczC github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/digitalocean/godo v1.152.0 h1:WRgkPMogZSXEJK70IkZKTB/PsMn16hMQ+NI3wCIQdzA= -github.com/digitalocean/godo v1.152.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM= +github.com/digitalocean/godo v1.157.0 h1:ReELaS6FxXNf8gryUiVH0wmyUmZN8/NCmBX4gXd3F0o= +github.com/digitalocean/godo v1.157.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw= -github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.3.0+incompatible h1:ffS62aKWupCWdvcee7nBU9fhnmknOqDPaJAMtfK0ImQ= +github.com/docker/docker v28.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -571,12 +571,14 @@ github.com/grafana/memberlist v0.3.1-0.20250428154222-f7d51a6f6700 h1:0t7iOQ5ZkB github.com/grafana/memberlist v0.3.1-0.20250428154222-f7d51a6f6700/go.mod h1:Ri9p/tRShbjYnpNf4FFPXG7wxEGY4Nrcn6E7jrVa//4= github.com/grafana/mimir-otlptranslator v0.0.0-20250703083430-c31a9568ad96 h1:kq5zJVW9LyFOB5xCeQPTON2HNjwwEkefhegZXGIhQPk= github.com/grafana/mimir-otlptranslator v0.0.0-20250703083430-c31a9568ad96/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= -github.com/grafana/mimir-prometheus v1.8.2-0.20250717103207-acaa45ca4d40 h1:OJyH1LqzHc1tHOxRQsu2pHxejVgErhh6r472wwyun6A= -github.com/grafana/mimir-prometheus v1.8.2-0.20250717103207-acaa45ca4d40/go.mod h1:MulFQg8pjFVYGJZACg+4he5Z6BnrlTYLCMfTDKp8uGc= +github.com/grafana/mimir-prometheus v1.8.2-0.20250725123259-c4bd4faba234 h1:hwME5D5GMogJkN9yobyKTEvOk/SzUFDnYNkUjLsg3ik= +github.com/grafana/mimir-prometheus v1.8.2-0.20250725123259-c4bd4faba234/go.mod h1:Pe/2vVv91zryCeOwLSjIFJFsw4Pvd2VNHbTUGu6kUls= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956 h1:em1oddjXL8c1tL0iFdtVtPloq2hRPen2MJQKoAWpxu0= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8= github.com/grafana/otel-profiling-go v0.5.1/go.mod h1:ftN/t5A/4gQI19/8MoWurBEtC6gFw8Dns1sJZ9W4Tls= +github.com/grafana/prometheus-alertmanager v0.25.1-0.20250722103749-329f0c4df1ba h1:8u5N0btFygn+2S+B6Xs0HFfq4NJ0kJsX9UpIOlidDmQ= +github.com/grafana/prometheus-alertmanager v0.25.1-0.20250722103749-329f0c4df1ba/go.mod h1:O/QP1BCm0HHIzbKvgMzqb5sSyH88rzkFk84F4TfJjBU= github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/grafana/regexp v0.0.0-20240531075221-3685f1377d7b h1:oMAq12GxTpwo9jxbnG/M4F/HdpwbibTaVoxNA0NZprY= @@ -711,8 +713,6 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/juliusmh/alertmanager v0.26.1-0.20250624114102-96969065f8d2 h1:J2VAb425QgLA/NEI/GK/jksDaiTqHQQPEK7mZ+LEQNI= -github.com/juliusmh/alertmanager v0.26.1-0.20250624114102-96969065f8d2/go.mod h1:O/QP1BCm0HHIzbKvgMzqb5sSyH88rzkFk84F4TfJjBU= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= @@ -749,8 +749,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/linode/linodego v1.52.1 h1:HJ1cz1n9n3chRP9UrtqmP91+xTi0Q5l+H/4z4tpkwgQ= -github.com/linode/linodego v1.52.1/go.mod h1:zEN2sX+cSdp67EuRY1HJiyuLujoa7HqvVwNEcJv3iXw= +github.com/linode/linodego v1.52.2 h1:N9ozU27To1LMSrDd8WvJZ5STSz1eGYdyLnxhAR/dIZg= +github.com/linode/linodego v1.52.2/go.mod h1:bI949fZaVchjWyKIA08hNyvAcV6BAS+PM2op3p7PAWA= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -906,8 +906,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 h1:R/zO7ombSHCI8bjQusgCMSL+cE669w5/R2upq5WlPD0= -github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/common v0.65.1-0.20250711183725-0e1982f10d4c h1:LfxkKdkGF+3fC5ZiHv5sWtEMH+STn+Edwx78s+W95QU= +github.com/prometheus/common v0.65.1-0.20250711183725-0e1982f10d4c/go.mod h1:LL3lcZII3UXGO4InbF+BTSsiAAPUBnwFVbp4gBWIMqw= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= @@ -1589,8 +1589,8 @@ google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRR google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= -google.golang.org/api v0.238.0 h1:+EldkglWIg/pWjkq97sd+XxH7PxakNYoe/rkSTbnvOs= -google.golang.org/api v0.238.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= +google.golang.org/api v0.239.0 h1:2hZKUnFZEy81eugPs4e2XzIJ5SOwQg0G82bpXD65Puo= +google.golang.org/api v0.239.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/vendor/github.com/prometheus/alertmanager/config/notifiers.go b/vendor/github.com/prometheus/alertmanager/config/notifiers.go index 97ce8692da6..16b7a33f308 100644 --- a/vendor/github.com/prometheus/alertmanager/config/notifiers.go +++ b/vendor/github.com/prometheus/alertmanager/config/notifiers.go @@ -514,7 +514,7 @@ type WebhookConfig struct { // Timeout is the maximum time allowed to invoke the webhook. Setting this to 0 // does not impose a timeout. - Timeout time.Duration `yaml:"timeout" json:"timeout"` + Timeout model.Duration `yaml:"timeout" json:"timeout"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go b/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go index eb4e01ba40d..153c17f565d 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go +++ b/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go @@ -21,6 +21,7 @@ import ( "net/http" "os" "strings" + "time" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -125,7 +126,7 @@ func (n *Notifier) Notify(ctx context.Context, alerts ...*types.Alert) (bool, er } if n.conf.Timeout > 0 { - postCtx, cancel := context.WithTimeoutCause(ctx, n.conf.Timeout, fmt.Errorf("configured webhook timeout reached (%s)", n.conf.Timeout)) + postCtx, cancel := context.WithTimeoutCause(ctx, time.Duration(n.conf.Timeout), fmt.Errorf("configured webhook timeout reached (%s)", n.conf.Timeout)) defer cancel() ctx = postCtx } diff --git a/vendor/github.com/prometheus/common/promslog/slog.go b/vendor/github.com/prometheus/common/promslog/slog.go index 8da43aef527..02370f17561 100644 --- a/vendor/github.com/prometheus/common/promslog/slog.go +++ b/vendor/github.com/prometheus/common/promslog/slog.go @@ -197,6 +197,13 @@ func newGoKitStyleReplaceAttrFunc(lvl *Level) func(groups []string, a slog.Attr) } default: } + + // Ensure time.Duration values are _always_ formatted as a Go + // duration string (ie, "1d2h3m"). + if v, ok := a.Value.Any().(time.Duration); ok { + a.Value = slog.StringValue(v.String()) + } + return a } } @@ -238,6 +245,13 @@ func defaultReplaceAttr(_ []string, a slog.Attr) slog.Attr { } default: } + + // Ensure time.Duration values are _always_ formatted as a Go duration + // string (ie, "1d2h3m"). + if v, ok := a.Value.Any().(time.Duration); ok { + a.Value = slog.StringValue(v.String()) + } + return a } diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 12ca828ae8f..7099ba325ab 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -104,7 +104,7 @@ func Load(s string, logger *slog.Logger) (*Config, error) { } switch cfg.OTLPConfig.TranslationStrategy { - case UnderscoreEscapingWithSuffixes: + case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes: case "": case NoTranslation, NoUTF8EscapingWithSuffixes: if cfg.GlobalConfig.MetricNameValidationScheme == model.LegacyValidation { @@ -1534,31 +1534,68 @@ func getGoGC() int { type translationStrategyOption string var ( - // NoUTF8EscapingWithSuffixes will accept metric/label names as they are. - // Unit and type suffixes may be added to metric names, according to certain rules. + // NoUTF8EscapingWithSuffixes will accept metric/label names as they are. Unit + // and type suffixes may be added to metric names, according to certain rules. NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes" - // UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus. - // This option will translate metric name characters that are not alphanumerics/underscores/colons to underscores, - // and label name characters that are not alphanumerics/underscores to underscores. - // Unit and type suffixes may be appended to metric names, according to certain rules. + // UnderscoreEscapingWithSuffixes is the default option for translating OTLP + // to Prometheus. This option will translate metric name characters that are + // not alphanumerics/underscores/colons to underscores, and label name + // characters that are not alphanumerics/underscores to underscores. Unit and + // type suffixes may be appended to metric names, according to certain rules. UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes" + // UnderscoreEscapingWithoutSuffixes translates metric name characters that + // are not alphanumerics/underscores/colons to underscores, and label name + // characters that are not alphanumerics/underscores to underscores, but + // unlike UnderscoreEscapingWithSuffixes it does not append any suffixes to + // the names. + UnderscoreEscapingWithoutSuffixes translationStrategyOption = "UnderscoreEscapingWithoutSuffixes" // NoTranslation (EXPERIMENTAL): disables all translation of incoming metric - // and label names. This offers a way for the OTLP users to use native metric names, reducing confusion. + // and label names. This offers a way for the OTLP users to use native metric + // names, reducing confusion. // // WARNING: This setting has significant known risks and limitations (see - // https://prometheus.io/docs/practices/naming/ for details): - // * Impaired UX when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling configuration). - // * Series collisions which in the best case may result in OOO errors, in the worst case a silently malformed - // time series. For instance, you may end up in situation of ingesting `foo.bar` series with unit - // `seconds` and a separate series `foo.bar` with unit `milliseconds`. + // https://prometheus.io/docs/practices/naming/ for details): * Impaired UX + // when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling + // configuration). * Series collisions which in the best case may result in + // OOO errors, in the worst case a silently malformed time series. For + // instance, you may end up in situation of ingesting `foo.bar` series with + // unit `seconds` and a separate series `foo.bar` with unit `milliseconds`. // - // As a result, this setting is experimental and currently, should not be used in - // production systems. + // As a result, this setting is experimental and currently, should not be used + // in production systems. // - // TODO(ArthurSens): Mention `type-and-unit-labels` feature (https://github.com/prometheus/proposals/pull/39) once released, as potential mitigation of the above risks. + // TODO(ArthurSens): Mention `type-and-unit-labels` feature + // (https://github.com/prometheus/proposals/pull/39) once released, as + // potential mitigation of the above risks. NoTranslation translationStrategyOption = "NoTranslation" ) +// ShouldEscape returns true if the translation strategy requires that metric +// names be escaped. +func (o translationStrategyOption) ShouldEscape() bool { + switch o { + case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes: + return true + case NoTranslation, NoUTF8EscapingWithSuffixes: + return false + default: + return false + } +} + +// ShouldAddSuffixes returns a bool deciding whether the given translation +// strategy should have suffixes added. +func (o translationStrategyOption) ShouldAddSuffixes() bool { + switch o { + case UnderscoreEscapingWithSuffixes, NoUTF8EscapingWithSuffixes: + return true + case UnderscoreEscapingWithoutSuffixes, NoTranslation: + return false + default: + return false + } +} + // OTLPConfig is the configuration for writing to the OTLP endpoint. type OTLPConfig struct { PromoteAllResourceAttributes bool `yaml:"promote_all_resource_attributes,omitempty"` diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index 24950d9d59b..51a46ca2317 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -365,8 +365,10 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ func (m *Manager) sender() { ticker := time.NewTicker(m.updatert) - defer ticker.Stop() - + defer func() { + ticker.Stop() + close(m.syncCh) + }() for { select { case <-m.ctx.Done(): @@ -508,19 +510,3 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { } return failed } - -// StaticProvider holds a list of target groups that never change. -type StaticProvider struct { - TargetGroups []*targetgroup.Group -} - -// Run implements the Worker interface. -func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { - // We still have to consider that the consumer exits right away in which case - // the context will be canceled. - select { - case ch <- sd.TargetGroups: - case <-ctx.Done(): - } - close(ch) -} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_slicelabels.go similarity index 100% rename from vendor/github.com/prometheus/prometheus/model/labels/labels.go rename to vendor/github.com/prometheus/prometheus/model/labels/labels_slicelabels.go diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index 6d7077d0b77..72b73de6340 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -114,8 +114,13 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.Regex.Regexp == nil { c.Regex = MustNewRegexp("") } - if c.MetricNameValidationScheme == model.UnsetValidation { - c.MetricNameValidationScheme = model.UTF8Validation + switch c.MetricNameValidationScheme { + case model.LegacyValidation, model.UTF8Validation: + case model.UnsetValidation: + //nolint:staticcheck // model.NameValidationScheme is deprecated. + c.MetricNameValidationScheme = model.NameValidationScheme + default: + return fmt.Errorf("unknown global name validation method specified, must be either '', 'legacy' or 'utf8', got %s", c.MetricNameValidationScheme) } return c.Validate() } @@ -131,7 +136,9 @@ func (c *Config) Validate() error { return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) } - if c.MetricNameValidationScheme == model.UnsetValidation { + switch c.MetricNameValidationScheme { + case model.LegacyValidation, model.UTF8Validation: + default: return errors.New("MetricNameValidationScheme must be set in relabel configuration") } diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go index 46cae2d608f..4e9d78abd0b 100644 --- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go @@ -340,7 +340,8 @@ func testTemplateParsing(rl *Rule) (errs []error) { } type parseArgs struct { - validationScheme model.ValidationScheme + validationScheme model.ValidationScheme + ignoreUnknownFields bool } type ParseOption func(*parseArgs) @@ -352,11 +353,19 @@ func WithValidationScheme(scheme model.ValidationScheme) ParseOption { } } +// WithIgnoreUnknownFields returns a ParseOption setting whether to ignore unknown fields. +func WithIgnoreUnknownFields(ignoreUnknownFields bool) ParseOption { + return func(args *parseArgs) { + args.ignoreUnknownFields = ignoreUnknownFields + } +} + // Parse parses and validates a set of rules. -// The default metric/label name validation scheme is model.UTF8Validation. -func Parse(content []byte, ignoreUnknownFields bool, opts ...ParseOption) (*RuleGroups, []error) { +// The default metric/label name validation scheme is model.NameValidationScheme. +func Parse(content []byte, opts ...ParseOption) (*RuleGroups, []error) { args := &parseArgs{ - validationScheme: model.UTF8Validation, + //nolint:staticcheck // model.NameValidationScheme is deprecated. + validationScheme: model.NameValidationScheme, } for _, opt := range opts { opt(args) @@ -369,7 +378,7 @@ func Parse(content []byte, ignoreUnknownFields bool, opts ...ParseOption) (*Rule ) decoder := yaml.NewDecoder(bytes.NewReader(content)) - if !ignoreUnknownFields { + if !args.ignoreUnknownFields { decoder.KnownFields(true) } err := decoder.Decode(&groups) @@ -390,12 +399,12 @@ func Parse(content []byte, ignoreUnknownFields bool, opts ...ParseOption) (*Rule } // ParseFile reads and parses rules from a file. -func ParseFile(file string, ignoreUnknownFields bool, opts ...ParseOption) (*RuleGroups, []error) { +func ParseFile(file string, opts ...ParseOption) (*RuleGroups, []error) { b, err := os.ReadFile(file) if err != nil { return nil, []error{fmt.Errorf("%s: %w", file, err)} } - rgs, errs := Parse(b, ignoreUnknownFields, opts...) + rgs, errs := Parse(b, opts...) for i := range errs { errs[i] = fmt.Errorf("%s: %w", file, errs[i]) } diff --git a/vendor/github.com/prometheus/prometheus/notifier/manager.go b/vendor/github.com/prometheus/prometheus/notifier/manager.go index 043bbf4d108..a7fe43eb774 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/manager.go +++ b/vendor/github.com/prometheus/prometheus/notifier/manager.go @@ -105,6 +105,20 @@ func NewManager(o *Options, logger *slog.Logger) *Manager { logger = promslog.NewNopLogger() } + for i, rc := range o.RelabelConfigs { + switch rc.MetricNameValidationScheme { + case model.LegacyValidation, model.UTF8Validation: + default: + //nolint:staticcheck // model.NameValidationScheme is deprecated. + o.RelabelConfigs[i].MetricNameValidationScheme = model.NameValidationScheme + logger.Warn( + "notifier.NewManager: using default metric/label name validation scheme", + "relabel_config", i, + "scheme", o.RelabelConfigs[i].MetricNameValidationScheme, + ) + } + } + n := &Manager{ queue: make([]*Alert, 0, o.QueueCapacity), more: make(chan struct{}, 1), @@ -134,6 +148,14 @@ func (n *Manager) ApplyConfig(conf *config.Config) error { n.opts.ExternalLabels = conf.GlobalConfig.ExternalLabels n.opts.RelabelConfigs = conf.AlertingConfig.AlertRelabelConfigs + for i, rc := range n.opts.RelabelConfigs { + switch rc.MetricNameValidationScheme { + case model.LegacyValidation, model.UTF8Validation: + default: + //nolint:staticcheck // model.NameValidationScheme is deprecated. + n.opts.RelabelConfigs[i].MetricNameValidationScheme = model.NameValidationScheme + } + } amSets := make(map[string]*alertmanagerSet) // configToAlertmanagers maps alertmanager sets for each unique AlertmanagerConfig, @@ -255,7 +277,10 @@ func (n *Manager) targetUpdateLoop(tsets <-chan map[string][]*targetgroup.Group) select { case <-n.stopRequested: return - case ts := <-tsets: + case ts, ok := <-tsets: + if !ok { + break + } n.reload(ts) } } @@ -309,11 +334,6 @@ func (n *Manager) Send(alerts ...*Alert) { n.mtx.Lock() defer n.mtx.Unlock() - for i, rc := range n.opts.RelabelConfigs { - if rc.MetricNameValidationScheme == model.UnsetValidation { - n.opts.RelabelConfigs[i].MetricNameValidationScheme = model.UTF8Validation - } - } alerts = relabelAlerts(n.opts.RelabelConfigs, n.opts.ExternalLabels, alerts) if len(alerts) == 0 { return diff --git a/vendor/github.com/prometheus/prometheus/promql/durations.go b/vendor/github.com/prometheus/prometheus/promql/durations.go index 20fa095d531..c882adfbb63 100644 --- a/vendor/github.com/prometheus/prometheus/promql/durations.go +++ b/vendor/github.com/prometheus/prometheus/promql/durations.go @@ -21,11 +21,20 @@ import ( "github.com/prometheus/prometheus/promql/parser" ) -// durationVisitor is a visitor that visits a duration expression and calculates the duration. +// durationVisitor is a visitor that calculates the actual value of +// duration expressions in AST nodes. For example the query +// "http_requests_total offset (1h / 2)" is represented in the AST +// as a VectorSelector with OriginalOffset 0 and the duration expression +// in OriginalOffsetExpr representing (1h / 2). This visitor evaluates +// such duration expression, setting OriginalOffset to 30m. type durationVisitor struct { step time.Duration } +// Visit finds any duration expressions in AST Nodes and modifies the Node to +// store the concrete value. Note that parser.Walk does NOT traverse the +// duration expressions such as OriginalOffsetExpr so we make our own recursive +// call on those to evaluate the result. func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visitor, error) { switch n := node.(type) { case *parser.VectorSelector: @@ -70,7 +79,8 @@ func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visit return v, nil } -// calculateDuration computes the duration from a duration expression. +// calculateDuration returns the float value of a duration expression as +// time.Duration or an error if the duration is invalid. func (v *durationVisitor) calculateDuration(expr parser.Expr, allowedNegative bool) (time.Duration, error) { duration, err := v.evaluateDurationExpr(expr) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index f8289111881..3cdf299dffc 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -151,17 +151,14 @@ type PrometheusQueryOpts struct { enablePerStepStats bool // Lookback delta duration for this query. lookbackDelta time.Duration - // validationScheme for metric/label names. - validationScheme model.ValidationScheme } var _ QueryOpts = &PrometheusQueryOpts{} -func NewPrometheusQueryOpts(enablePerStepStats bool, lookbackDelta time.Duration, validationScheme model.ValidationScheme) QueryOpts { +func NewPrometheusQueryOpts(enablePerStepStats bool, lookbackDelta time.Duration) QueryOpts { return &PrometheusQueryOpts{ enablePerStepStats: enablePerStepStats, lookbackDelta: lookbackDelta, - validationScheme: validationScheme, } } @@ -173,17 +170,11 @@ func (p *PrometheusQueryOpts) LookbackDelta() time.Duration { return p.lookbackDelta } -func (p *PrometheusQueryOpts) ValidationScheme() model.ValidationScheme { - return p.validationScheme -} - type QueryOpts interface { // Enables recording per-step statistics if the engine has it enabled as well. Disabled by default. EnablePerStepStats() bool // Lookback delta duration for this query. LookbackDelta() time.Duration - // ValidationScheme to use for metric and label names. - ValidationScheme() model.ValidationScheme } // query implements the Query interface. @@ -202,8 +193,6 @@ type query struct { matrix Matrix // Cancellation function for the query. cancel func() - // validationScheme used for metric and label names. - validationScheme model.ValidationScheme // The engine against which the query is executed. ng *Engine @@ -531,7 +520,7 @@ func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts Q func (ng *Engine) newQuery(q storage.Queryable, qs string, opts QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) { if opts == nil { - opts = NewPrometheusQueryOpts(false, 0, model.UTF8Validation) + opts = NewPrometheusQueryOpts(false, 0) } lookbackDelta := opts.LookbackDelta() @@ -546,13 +535,12 @@ func (ng *Engine) newQuery(q storage.Queryable, qs string, opts QueryOpts, start LookbackDelta: lookbackDelta, } qry := &query{ - q: qs, - stmt: es, - ng: ng, - stats: stats.NewQueryTimers(), - sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats()), - queryable: q, - validationScheme: opts.ValidationScheme(), + q: qs, + stmt: es, + ng: ng, + stats: stats.NewQueryTimers(), + sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats()), + queryable: q, } return &es.Expr, qry } @@ -757,7 +745,6 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval enableDelayedNameRemoval: ng.enableDelayedNameRemoval, enableTypeAndUnitLabels: ng.enableTypeAndUnitLabels, querier: querier, - validationScheme: query.validationScheme, } query.sampleStats.InitStepTracking(start, start, 1) @@ -818,7 +805,6 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval enableDelayedNameRemoval: ng.enableDelayedNameRemoval, enableTypeAndUnitLabels: ng.enableTypeAndUnitLabels, querier: querier, - validationScheme: query.validationScheme, } query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval) val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr) @@ -1093,7 +1079,6 @@ type evaluator struct { enableDelayedNameRemoval bool enableTypeAndUnitLabels bool querier storage.Querier - validationScheme model.ValidationScheme } // errorf causes a panic with the input formatted into an error. @@ -1243,7 +1228,7 @@ func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annota // function call results. // The prepSeries function (if provided) can be used to prepare the helper // for each series, then passed to each call funcCall. -func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { +func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]Vector, Matrix, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 matrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs)) @@ -1265,8 +1250,7 @@ func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Label } } - vectors := make([]Vector, len(exprs)) // Input vectors for the function. - args := make([]parser.Value, len(exprs)) // Argument to function. + vectors := make([]Vector, len(exprs)) // Input vectors for the function. // Create an output vector that is as big as the input matrix with // the most time series. biggestLen := 1 @@ -1320,7 +1304,6 @@ func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Label sh = seriesHelpers[i] } vectors[i], bh = ev.gatherVector(ts, matrixes[i], vectors[i], bh, sh) - args[i] = vectors[i] if prepSeries != nil { bufHelpers[i] = bh } @@ -1328,7 +1311,7 @@ func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Label // Make the function call. enh.Ts = ts - result, ws := funcCall(args, bufHelpers, enh) + result, ws := funcCall(vectors, nil, bufHelpers, enh) enh.Out = result[:0] // Reuse result vector. warnings.Merge(ws) @@ -1693,15 +1676,15 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, if e.Op == parser.COUNT_VALUES { valueLabel := param.(*parser.StringLiteral) - if !labels.IsValidLabelName(valueLabel.Val, ev.validationScheme) { + if !model.LabelName(valueLabel.Val).IsValid() { ev.errorf("invalid label name %s", valueLabel) } if !e.Without { sortedGrouping = append(sortedGrouping, valueLabel.Val) slices.Sort(sortedGrouping) } - return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0].(Vector), enh) + return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0], enh) }, e.Expr) } @@ -1781,22 +1764,18 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, if !matrixArg { // Does not have a matrix argument. - return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, annos := call(v, e.Args, enh) + return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, annos := call(v, nil, e.Args, enh) return vec, warnings.Merge(annos) }, e.Args...) } - inArgs := make([]parser.Value, len(e.Args)) // Evaluate any non-matrix arguments. - otherArgs := make([]Matrix, len(e.Args)) - otherInArgs := make([]Vector, len(e.Args)) + evalVals := make([]Matrix, len(e.Args)) for i, e := range e.Args { if i != matrixArgIndex { val, ws := ev.eval(ctx, e) - otherArgs[i] = val.(Matrix) - otherInArgs[i] = Vector{Sample{}} - inArgs[i] = otherInArgs[i] + evalVals[i] = val.(Matrix) warnings.Merge(ws) } } @@ -1824,7 +1803,6 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, var histograms []HPoint var prevSS *Series inMatrix := make(Matrix, 1) - inArgs[matrixArgIndex] = inMatrix enh := &EvalNodeHelper{Out: make(Vector, 0, 1), enableDelayedNameRemoval: ev.enableDelayedNameRemoval} // Process all the calls for one time series at a time. it := storage.NewBuffer(selRange) @@ -1835,7 +1813,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, // vector functions, the only change needed is to drop the // metric name in the output. dropName := e.Func.Name != "last_over_time" - + vectorVals := make([]Vector, len(e.Args)-1) for i, s := range selVS.Series { if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) @@ -1863,9 +1841,11 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, // Set the non-matrix arguments. // They are scalar, so it is safe to use the step number // when looking up the argument, as there will be no gaps. + counter := 0 for j := range e.Args { if j != matrixArgIndex { - otherInArgs[j][0].F = otherArgs[j][0].Floats[step].F + vectorVals[counter] = Vector{Sample{F: evalVals[j][0].Floats[step].F}} + counter++ } } // Evaluate the matrix selector for this series @@ -1882,8 +1862,9 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, inMatrix[0].Floats = floats inMatrix[0].Histograms = histograms enh.Ts = ts + // Make the function call. - outVec, annos := call(inArgs, e.Args, enh) + outVec, annos := call(vectorVals, inMatrix, e.Args, enh) warnings.Merge(annos) ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+totalHPointSize(histograms))) @@ -1923,7 +1904,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, if ev.enableTypeAndUnitLabels { // When type-and-unit-labels feature is enabled, check __type__ label typeLabel := inMatrix[0].Metric.Get("__type__") - if typeLabel != string(model.MetricTypeCounter) { + if typeLabel != string(model.MetricTypeCounter) && typeLabel != string(model.MetricTypeHistogram) { warnings.Add(annotations.NewPossibleNonCounterLabelInfo(metricName, typeLabel, e.Args[0].PositionRange())) } } else if !strings.HasSuffix(metricName, "_total") && @@ -2017,8 +1998,8 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, case *parser.BinaryExpr: switch lt, rt := e.LHS.Type(), e.RHS.Type(); { case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: - return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) + return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + val := scalarBinop(e.Op, v[0][0].F, v[1][0].F) return append(enh.Out, Sample{F: val}), nil }, e.LHS, e.RHS) case lt == parser.ValueTypeVector && rt == parser.ValueTypeVector: @@ -2030,40 +2011,40 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, } switch e.Op { case parser.LAND: - return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil + return ev.rangeEval(ctx, initSignatures, func(v []Vector, _ Matrix, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.VectorAnd(v[0], v[1], e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LOR: - return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil + return ev.rangeEval(ctx, initSignatures, func(v []Vector, _ Matrix, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.VectorOr(v[0], v[1], e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LUNLESS: - return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil + return ev.rangeEval(ctx, initSignatures, func(v []Vector, _ Matrix, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.VectorUnless(v[0], v[1], e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) default: - return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh, e.PositionRange()) + return ev.rangeEval(ctx, initSignatures, func(v []Vector, _ Matrix, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorBinop(e.Op, v[0], v[1], e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: - return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh, e.PositionRange()) + return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorscalarBinop(e.Op, v[0], Scalar{V: v[1][0].F}, false, e.ReturnBool, enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: - return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh, e.PositionRange()) + return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorscalarBinop(e.Op, v[1], Scalar{V: v[0][0].F}, true, e.ReturnBool, enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case *parser.NumberLiteral: span.SetAttributes(attribute.Float64("value", e.Val)) - return ev.rangeEval(ctx, nil, func(_ []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(_ []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) @@ -2099,7 +2080,6 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, enableDelayedNameRemoval: ev.enableDelayedNameRemoval, enableTypeAndUnitLabels: ev.enableTypeAndUnitLabels, querier: ev.querier, - validationScheme: ev.validationScheme, } if e.Step != 0 { @@ -2146,7 +2126,6 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, enableDelayedNameRemoval: ev.enableDelayedNameRemoval, enableTypeAndUnitLabels: ev.enableTypeAndUnitLabels, querier: ev.querier, - validationScheme: ev.validationScheme, } res, ws := newEv.eval(ctx, e.Expr) ev.currentSamples = newEv.currentSamples @@ -2236,7 +2215,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1) } - return ev.rangeEval(ctx, nil, func(_ []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(_ []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if vs.Timestamp != nil { // This is a special case for "timestamp()" when the @ modifier is used, to ensure that // we return a point for each time step in this case. @@ -2265,7 +2244,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co } } ev.samplesStats.UpdatePeak(ev.currentSamples) - vec, annos := call([]parser.Value{vec}, e.Args, enh) + vec, annos := call([]Vector{vec}, nil, e.Args, enh) return vec, ws.Merge(annos) }) } diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index d22329139dc..d9839c5a054 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -56,10 +56,10 @@ import ( // metrics, the timestamp are not needed. // // Scalar results should be returned as the value of a sample in a Vector. -type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) +type FunctionCall func(vectorVals []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) // === time() float64 === -func funcTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{ F: float64(enh.Ts) / 1000, }}, nil @@ -69,11 +69,11 @@ func funcTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vect // It calculates the rate (allowing for counter resets if isCounter is true), // extrapolates if the first/last sample is close to the boundary, and returns // the result as either per-second (if isRate is true) or overall. -func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) { +func extrapolatedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) { ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( - samples = vals[0].(Matrix)[0] + samples = vals[0] rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) resultFloat float64 @@ -144,32 +144,37 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod // (which is our guess for where the series actually starts or ends). extrapolationThreshold := averageDurationBetweenSamples * 1.1 - extrapolateToInterval := sampledInterval - if durationToStart >= extrapolationThreshold { durationToStart = averageDurationBetweenSamples / 2 } - if isCounter && resultFloat > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 { + if isCounter { // Counters cannot be negative. If we have any slope at all // (i.e. resultFloat went up), we can extrapolate the zero point // of the counter. If the duration to the zero point is shorter // than the durationToStart, we take the zero point as the start // of the series, thereby avoiding extrapolation to negative // counter values. - // TODO(beorn7): Do this for histograms, too. - durationToZero := sampledInterval * (samples.Floats[0].F / resultFloat) + durationToZero := durationToStart + if resultFloat > 0 && + len(samples.Floats) > 0 && + samples.Floats[0].F >= 0 { + durationToZero = sampledInterval * (samples.Floats[0].F / resultFloat) + } else if resultHistogram != nil && + resultHistogram.Count > 0 && + len(samples.Histograms) > 0 && + samples.Histograms[0].H.Count >= 0 { + durationToZero = sampledInterval * (samples.Histograms[0].H.Count / resultHistogram.Count) + } if durationToZero < durationToStart { durationToStart = durationToZero } } - extrapolateToInterval += durationToStart if durationToEnd >= extrapolationThreshold { durationToEnd = averageDurationBetweenSamples / 2 } - extrapolateToInterval += durationToEnd - factor := extrapolateToInterval / sampledInterval + factor := (sampledInterval + durationToStart + durationToEnd) / sampledInterval if isRate { factor /= ms.Range.Seconds() } @@ -283,33 +288,33 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra } // === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return extrapolatedRate(vals, args, enh, false, false) +func funcDelta(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return extrapolatedRate(matrixVals, args, enh, false, false) } // === rate(node parser.ValueTypeMatrix) (Vector, Annotations) === -func funcRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return extrapolatedRate(vals, args, enh, true, true) +func funcRate(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return extrapolatedRate(matrixVals, args, enh, true, true) } // === increase(node parser.ValueTypeMatrix) (Vector, Annotations) === -func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return extrapolatedRate(vals, args, enh, true, false) +func funcIncrease(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return extrapolatedRate(matrixVals, args, enh, true, false) } // === irate(node parser.ValueTypeMatrix) (Vector, Annotations) === -func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return instantValue(vals, args, enh.Out, true) +func funcIrate(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return instantValue(matrixVals, args, enh.Out, true) } // === idelta(node model.ValMatrix) (Vector, Annotations) === -func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return instantValue(vals, args, enh.Out, false) +func funcIdelta(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return instantValue(matrixVals, args, enh.Out, false) } -func instantValue(vals []parser.Value, args parser.Expressions, out Vector, isRate bool) (Vector, annotations.Annotations) { +func instantValue(vals Matrix, args parser.Expressions, out Vector, isRate bool) (Vector, annotations.Annotations) { var ( - samples = vals[0].(Matrix)[0] + samples = vals[0] metricName = samples.Metric.Get(labels.MetricName) ss = make([]Sample, 0, 2) annos annotations.Annotations @@ -436,14 +441,14 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // affects how trends in historical data will affect the current data. A higher // trend factor increases the influence. of trends. Algorithm taken from // https://en.wikipedia.org/wiki/Exponential_smoothing . -func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] +func funcDoubleExponentialSmoothing(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + samples := matrixVal[0] metricName := samples.Metric.Get(labels.MetricName) // The smoothing factor argument. - sf := vals[1].(Vector)[0].F + sf := vectorVals[0][0].F // The trend factor argument. - tf := vals[2].(Vector)[0].F + tf := vectorVals[1][0].F // Check that the input parameters are valid. if sf <= 0 || sf >= 1 { @@ -499,27 +504,27 @@ func filterFloats(v Vector) Vector { } // === sort(node parser.ValueTypeVector) (Vector, Annotations) === -func funcSort(vals []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSort(vectorVals []Vector, _ Matrix, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take descending sort with NaN first and // reverse it. - byValueSorter := vectorByReverseValueHeap(filterFloats(vals[0].(Vector))) + byValueSorter := vectorByReverseValueHeap(filterFloats(vectorVals[0])) sort.Sort(sort.Reverse(byValueSorter)) return Vector(byValueSorter), nil } // === sortDesc(node parser.ValueTypeVector) (Vector, Annotations) === -func funcSortDesc(vals []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortDesc(vectorVals []Vector, _ Matrix, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take ascending sort with NaN first and // reverse it. - byValueSorter := vectorByValueHeap(filterFloats(vals[0].(Vector))) + byValueSorter := vectorByValueHeap(filterFloats(vectorVals[0])) sort.Sort(sort.Reverse(byValueSorter)) return Vector(byValueSorter), nil } // === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === -func funcSortByLabel(vals []parser.Value, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortByLabel(vectorVals []Vector, _ Matrix, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { lbls := stringSliceFromArgs(args[1:]) - slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { + slices.SortFunc(vectorVals[0], func(a, b Sample) int { for _, label := range lbls { lv1 := a.Metric.Get(label) lv2 := b.Metric.Get(label) @@ -539,13 +544,13 @@ func funcSortByLabel(vals []parser.Value, args parser.Expressions, _ *EvalNodeHe return labels.Compare(a.Metric, b.Metric) }) - return vals[0].(Vector), nil + return vectorVals[0], nil } // === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === -func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortByLabelDesc(vectorVals []Vector, _ Matrix, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { lbls := stringSliceFromArgs(args[1:]) - slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { + slices.SortFunc(vectorVals[0], func(a, b Sample) int { for _, label := range lbls { lv1 := a.Metric.Get(label) lv2 := b.Metric.Get(label) @@ -565,7 +570,7 @@ func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, _ *EvalNo return -labels.Compare(a.Metric, b.Metric) }) - return vals[0].(Vector), nil + return vectorVals[0], nil } func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, annotations.Annotations) { @@ -590,46 +595,46 @@ func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, ann } // === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === -func funcClamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vals[0].(Vector) - minVal := vals[1].(Vector)[0].F - maxVal := vals[2].(Vector)[0].F +func funcClamp(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vectorVals[0] + minVal := vectorVals[1][0].F + maxVal := vectorVals[2][0].F return clamp(vec, minVal, maxVal, enh) } // === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === -func funcClampMax(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vals[0].(Vector) - maxVal := vals[1].(Vector)[0].F +func funcClampMax(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vectorVals[0] + maxVal := vectorVals[1][0].F return clamp(vec, math.Inf(-1), maxVal, enh) } // === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === -func funcClampMin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vals[0].(Vector) - minVal := vals[1].(Vector)[0].F +func funcClampMin(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vectorVals[0] + minVal := vectorVals[1][0].F return clamp(vec, minVal, math.Inf(+1), enh) } // === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) === -func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcRound(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { // round returns a number rounded to toNearest. // Ties are solved by rounding up. toNearest := float64(1) if len(args) >= 2 { - toNearest = vals[1].(Vector)[0].F + toNearest = vectorVals[1][0].F } // Invert as it seems to cause fewer floating point accuracy issues. toNearestInverse := 1.0 / toNearest - return simpleFloatFunc(vals, enh, func(f float64) float64 { + return simpleFloatFunc(vectorVals, enh, func(f float64) float64 { return math.Floor(f*toNearestInverse+0.5) / toNearestInverse }), nil } // === Scalar(node parser.ValueTypeVector) Scalar === -func funcScalar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcScalar(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { var ( - v = vals[0].(Vector) + v = vectorVals[0] value float64 found bool ) @@ -651,22 +656,22 @@ func funcScalar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) return append(enh.Out, Sample{F: value}), nil } -func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { - el := vals[0].(Matrix)[0] +func aggrOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { + el := matrixVal[0] return append(enh.Out, Sample{F: aggrFn(el)}) } -func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) { - el := vals[0].(Matrix)[0] +func aggrHistOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) { + el := matrixVal[0] res, err := aggrFn(el) return append(enh.Out, Sample{H: res}), err } // === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - firstSeries := vals[0].(Matrix)[0] +func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + firstSeries := matrixVal[0] if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 { metricName := firstSeries.Metric.Get(labels.MetricName) return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) @@ -695,7 +700,7 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // the current implementation is accurate enough for practical purposes. if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. - vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) { + vec, err := aggrHistOverTime(matrixVal, enh, func(s Series) (*histogram.FloatHistogram, error) { mean := s.Histograms[0].H.Copy() for i, h := range s.Histograms[1:] { count := float64(i + 2) @@ -722,7 +727,7 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } return vec, nil } - return aggrOverTime(vals, enh, func(s Series) float64 { + return aggrOverTime(matrixVal, enh, func(s Series) float64 { var ( // Pre-set the 1st sample to start the loop with the 2nd. sum, count = s.Floats[0].F, 1. @@ -756,15 +761,15 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } // === count_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcCountOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return aggrOverTime(vals, enh, func(s Series) float64 { +func funcCountOverTime(_ []Vector, matrixVals Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return aggrOverTime(matrixVals, enh, func(s Series) float64 { return float64(len(s.Floats) + len(s.Histograms)) }), nil } // === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - el := vals[0].(Matrix)[0] +func funcLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + el := matrixVal[0] var f FPoint if len(el.Floats) > 0 { @@ -789,8 +794,8 @@ func funcLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHe } // === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] +func funcMadOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + samples := matrixVal[0] var annos annotations.Annotations if len(samples.Floats) == 0 { return enh.Out, nil @@ -799,7 +804,7 @@ func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode metricName := samples.Metric.Get(labels.MetricName) annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) } - return aggrOverTime(vals, enh, func(s Series) float64 { + return aggrOverTime(matrixVal, enh, func(s Series) float64 { values := make(vectorByValueHeap, 0, len(s.Floats)) for _, f := range s.Floats { values = append(values, Sample{F: f.F}) @@ -814,8 +819,8 @@ func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } // === ts_of_last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcTsOfLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - el := vals[0].(Matrix)[0] +func funcTsOfLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + el := matrixVal[0] var tf int64 if len(el.Floats) > 0 { @@ -834,22 +839,22 @@ func funcTsOfLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNo } // === ts_of_max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcTsOfMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { +func funcTsOfMaxOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(matrixVal, args, enh, func(cur, maxVal float64) bool { return (cur >= maxVal) || math.IsNaN(maxVal) }, true) } // === ts_of_min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcTsOfMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { +func funcTsOfMinOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(matrixVals, args, enh, func(cur, maxVal float64) bool { return (cur <= maxVal) || math.IsNaN(maxVal) }, true) } // compareOverTime is a helper used by funcMaxOverTime and funcMinOverTime. -func compareOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, compareFn func(float64, float64) bool, returnTimestamp bool) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] +func compareOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper, compareFn func(float64, float64) bool, returnTimestamp bool) (Vector, annotations.Annotations) { + samples := matrixVal[0] var annos annotations.Annotations if len(samples.Floats) == 0 { return enh.Out, nil @@ -858,7 +863,7 @@ func compareOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode metricName := samples.Metric.Get(labels.MetricName) annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) } - return aggrOverTime(vals, enh, func(s Series) float64 { + return aggrOverTime(matrixVal, enh, func(s Series) float64 { maxVal := s.Floats[0].F tsOfMax := s.Floats[0].T for _, f := range s.Floats { @@ -875,29 +880,29 @@ func compareOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } // === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { +func funcMaxOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(matrixVals, args, enh, func(cur, maxVal float64) bool { return (cur > maxVal) || math.IsNaN(maxVal) }, false) } // === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { +func funcMinOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(matrixVals, args, enh, func(cur, maxVal float64) bool { return (cur < maxVal) || math.IsNaN(maxVal) }, false) } // === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - firstSeries := vals[0].(Matrix)[0] +func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + firstSeries := matrixVal[0] if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 { metricName := firstSeries.Metric.Get(labels.MetricName) return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) } if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. - vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) { + vec, err := aggrHistOverTime(matrixVal, enh, func(s Series) (*histogram.FloatHistogram, error) { sum := s.Histograms[0].H.Copy() for _, h := range s.Histograms[1:] { _, err := sum.Add(h.H) @@ -917,7 +922,7 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } return vec, nil } - return aggrOverTime(vals, enh, func(s Series) float64 { + return aggrOverTime(matrixVal, enh, func(s Series) float64 { var sum, c float64 for _, f := range s.Floats { sum, c = kahanSumInc(f.F, sum, c) @@ -930,9 +935,9 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } // === quantile_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - q := vals[0].(Vector)[0].F - el := vals[1].(Matrix)[0] +func funcQuantileOverTime(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + q := vectorVals[0][0].F + el := matrixVal[0] if len(el.Floats) == 0 { return enh.Out, nil } @@ -952,8 +957,8 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva return append(enh.Out, Sample{F: quantile(q, values)}), annos } -func varianceOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] +func varianceOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { + samples := matrixVal[0] var annos annotations.Annotations if len(samples.Floats) == 0 { return enh.Out, nil @@ -962,7 +967,7 @@ func varianceOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod metricName := samples.Metric.Get(labels.MetricName) annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) } - return aggrOverTime(vals, enh, func(s Series) float64 { + return aggrOverTime(matrixVal, enh, func(s Series) float64 { var count float64 var mean, cMean float64 var aux, cAux float64 @@ -981,18 +986,18 @@ func varianceOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod } // === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return varianceOverTime(vals, args, enh, math.Sqrt) +func funcStddevOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return varianceOverTime(matrixVals, args, enh, math.Sqrt) } // === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return varianceOverTime(vals, args, enh, nil) +func funcStdvarOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return varianceOverTime(matrixVals, args, enh, nil) } // === absent(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Vector)) > 0 { +func funcAbsent(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + if len(vectorVals[0]) > 0 { return enh.Out, nil } return append(enh.Out, @@ -1007,19 +1012,19 @@ func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe // This function will return 1 if the matrix has at least one element. // Due to engine optimization, this function is only called when this condition is true. // Then, the engine post-processes the results to get the expected output. -func funcAbsentOverTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAbsentOverTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: 1}), nil } // === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) === -func funcPresentOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return aggrOverTime(vals, enh, func(_ Series) float64 { +func funcPresentOverTime(_ []Vector, matrixVals Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return aggrOverTime(matrixVals, enh, func(_ Series) float64 { return 1 }), nil } -func simpleFloatFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { - for _, el := range vals[0].(Vector) { +func simpleFloatFunc(vectorVals []Vector, enh *EvalNodeHelper, f func(float64) float64) Vector { + for _, el := range vectorVals[0] { if el.H == nil { // Process only float samples. if !enh.enableDelayedNameRemoval { el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel) @@ -1035,127 +1040,127 @@ func simpleFloatFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) f } // === abs(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAbs(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Abs), nil +func funcAbs(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Abs), nil } // === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCeil(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Ceil), nil +func funcCeil(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Ceil), nil } // === floor(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcFloor(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Floor), nil +func funcFloor(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Floor), nil } // === exp(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcExp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Exp), nil +func funcExp(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Exp), nil } // === sqrt(Vector VectorNode) (Vector, Annotations) === -func funcSqrt(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Sqrt), nil +func funcSqrt(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Sqrt), nil } // === ln(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Log), nil +func funcLn(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Log), nil } // === log2(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLog2(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Log2), nil +func funcLog2(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Log2), nil } // === log10(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLog10(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Log10), nil +func funcLog10(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Log10), nil } // === sin(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Sin), nil +func funcSin(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Sin), nil } // === cos(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Cos), nil +func funcCos(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Cos), nil } // === tan(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Tan), nil +func funcTan(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Tan), nil } // === asin(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAsin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Asin), nil +func funcAsin(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Asin), nil } // === acos(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAcos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Acos), nil +func funcAcos(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Acos), nil } // === atan(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAtan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Atan), nil +func funcAtan(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Atan), nil } // === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Sinh), nil +func funcSinh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Sinh), nil } // === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Cosh), nil +func funcCosh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Cosh), nil } // === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Tanh), nil +func funcTanh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Tanh), nil } // === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAsinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Asinh), nil +func funcAsinh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Asinh), nil } // === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAcosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Acosh), nil +func funcAcosh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Acosh), nil } // === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAtanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Atanh), nil +func funcAtanh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Atanh), nil } // === rad(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcRad(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, func(v float64) float64 { +func funcRad(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, func(v float64) float64 { return v * math.Pi / 180 }), nil } // === deg(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcDeg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, func(v float64) float64 { +func funcDeg(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, func(v float64) float64 { return v * 180 / math.Pi }), nil } // === pi() Scalar === -func funcPi(_ []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcPi(_ []Vector, _ Matrix, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{F: math.Pi}}, nil } // === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSgn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, func(v float64) float64 { +func funcSgn(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, func(v float64) float64 { switch { case v < 0: return -1 @@ -1168,8 +1173,8 @@ func funcSgn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Ve } // === timestamp(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTimestamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vals[0].(Vector) +func funcTimestamp(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vectorVals[0] for _, el := range vec { if !enh.enableDelayedNameRemoval { el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel) @@ -1245,8 +1250,8 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f } // === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) === -func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] +func funcDeriv(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + samples := matrixVal[0] metricName := samples.Metric.Get(labels.MetricName) // No sense in trying to compute a derivative without at least two float points. @@ -1270,9 +1275,9 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper } // === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) (Vector, Annotations) === -func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] - duration := vals[1].(Vector)[0].F +func funcPredictLinear(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + samples := matrixVal[0] + duration := vectorVals[0][0].F metricName := samples.Metric.Get(labels.MetricName) // No sense in trying to predict anything without at least two float points. @@ -1292,8 +1297,8 @@ func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNo return append(enh.Out, Sample{F: slope*duration + intercept}), nil } -func simpleHistogramFunc(vals []parser.Value, enh *EvalNodeHelper, f func(h *histogram.FloatHistogram) float64) Vector { - for _, el := range vals[0].(Vector) { +func simpleHistogramFunc(vectorVals []Vector, enh *EvalNodeHelper, f func(h *histogram.FloatHistogram) float64) Vector { + for _, el := range vectorVals[0] { if el.H != nil { // Process only histogram samples. if !enh.enableDelayedNameRemoval { el.Metric = el.Metric.DropMetricName() @@ -1309,28 +1314,28 @@ func simpleHistogramFunc(vals []parser.Value, enh *EvalNodeHelper, f func(h *his } // === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramCount(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { +func funcHistogramCount(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 { return h.Count }), nil } // === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramSum(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { +func funcHistogramSum(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 { return h.Sum }), nil } // === histogram_avg(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramAvg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { +func funcHistogramAvg(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 { return h.Sum / h.Count }), nil } -func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { - return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { +func histogramVariance(vectorVals []Vector, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 { mean := h.Sum / h.Count var variance, cVariance float64 it := h.AllBucketIterator() @@ -1367,20 +1372,20 @@ func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResul } // === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramStdDev(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return histogramVariance(vals, enh, math.Sqrt) +func funcHistogramStdDev(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return histogramVariance(vectorVals, enh, math.Sqrt) } // === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramStdVar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return histogramVariance(vals, enh, nil) +func funcHistogramStdVar(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return histogramVariance(vectorVals, enh, nil) } // === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - lower := vals[0].(Vector)[0].F - upper := vals[1].(Vector)[0].F - inVec := vals[2].(Vector) +func funcHistogramFraction(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + lower := vectorVals[0][0].F + upper := vectorVals[1][0].F + inVec := vectorVals[2] annos := enh.resetHistograms(inVec, args[2]) @@ -1422,9 +1427,9 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev } // === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - q := vals[0].(Vector)[0].F - inVec := vals[1].(Vector) +func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + q := vectorVals[0][0].F + inVec := vectorVals[1] var annos annotations.Annotations if math.IsNaN(q) || q < 0 || q > 1 { @@ -1474,9 +1479,9 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev } // === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcResets(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - floats := vals[0].(Matrix)[0].Floats - histograms := vals[0].(Matrix)[0].Histograms +func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + floats := matrixVal[0].Floats + histograms := matrixVal[0].Histograms resets := 0 if len(floats) == 0 && len(histograms) == 0 { return enh.Out, nil @@ -1519,9 +1524,9 @@ func funcResets(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) } // === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcChanges(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - floats := vals[0].(Matrix)[0].Floats - histograms := vals[0].(Matrix)[0].Histograms +func funcChanges(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + floats := matrixVal[0].Floats + histograms := matrixVal[0].Histograms changes := 0 if len(floats) == 0 && len(histograms) == 0 { return enh.Out, nil @@ -1576,7 +1581,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio if err != nil { panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) } - if !labels.IsValidLabelName(dst, ev.validationScheme) { + if !model.LabelName(dst).IsValid() { panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) } @@ -1607,11 +1612,11 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio } // === Vector(s Scalar) (Vector, Annotations) === -func funcVector(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcVector(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{ Metric: labels.Labels{}, - F: vals[0].(Vector)[0].F, + F: vectorVals[0][0].F, }), nil } @@ -1624,12 +1629,12 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) ) for i := 3; i < len(args); i++ { src := stringFromArg(args[i]) - if !labels.IsValidLabelName(src, ev.validationScheme) { + if !model.LabelName(src).IsValid() { panic(fmt.Errorf("invalid source label name in label_join(): %s", src)) } srcLabels[i-3] = src } - if !labels.IsValidLabelName(dst, ev.validationScheme) { + if !model.LabelName(dst).IsValid() { panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst)) } @@ -1661,8 +1666,8 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) } // Common code for date related functions. -func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector { - if len(vals) == 0 { +func dateWrapper(vectorVals []Vector, enh *EvalNodeHelper, f func(time.Time) float64) Vector { + if len(vectorVals) == 0 { return append(enh.Out, Sample{ Metric: labels.Labels{}, @@ -1670,7 +1675,7 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo }) } - for _, el := range vals[0].(Vector) { + for _, el := range vectorVals[0] { if el.H != nil { // Ignore histogram sample. continue @@ -1689,57 +1694,57 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo } // === days_in_month(v Vector) Scalar === -func funcDaysInMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcDaysInMonth(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day()) }), nil } // === day_of_month(v Vector) Scalar === -func funcDayOfMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcDayOfMonth(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(t.Day()) }), nil } // === day_of_week(v Vector) Scalar === -func funcDayOfWeek(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcDayOfWeek(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(t.Weekday()) }), nil } // === day_of_year(v Vector) Scalar === -func funcDayOfYear(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcDayOfYear(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(t.YearDay()) }), nil } // === hour(v Vector) Scalar === -func funcHour(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcHour(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(t.Hour()) }), nil } // === minute(v Vector) Scalar === -func funcMinute(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcMinute(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(t.Minute()) }), nil } // === month(v Vector) Scalar === -func funcMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcMonth(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(t.Month()) }), nil } // === year(v Vector) Scalar === -func funcYear(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcYear(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(t.Year()) }), nil } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index 3747e01f53c..e7e16cd0330 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -23,6 +23,8 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/promql/parser/posrange" + + "github.com/prometheus/common/model" ) %} @@ -376,14 +378,14 @@ grouping_label_list: grouping_label : maybe_label { - if !labels.IsValidLabelName($1.Val, yylex.(*parser).validationScheme) { + if !model.LabelName($1.Val).IsValid() { yylex.(*parser).addParseErrf($1.PositionRange(),"invalid label name for grouping: %q", $1.Val) } $$ = $1 } | STRING { unquoted := yylex.(*parser).unquoteString($1.Val) - if !labels.IsValidLabelName(unquoted, yylex.(*parser).validationScheme) { + if !model.LabelName(unquoted).IsValid() { yylex.(*parser).addParseErrf($1.PositionRange(),"invalid label name for grouping: %q", unquoted) } $$ = $1 diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go index 17fd159e5a6..e93d1b3de6b 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go @@ -12,6 +12,8 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql/parser/posrange" + + "github.com/prometheus/common/model" ) type yySymType struct { @@ -1325,7 +1327,7 @@ yydefault: case 59: yyDollar = yyS[yypt-1 : yypt+1] { - if !labels.IsValidLabelName(yyDollar[1].item.Val, yylex.(*parser).validationScheme) { + if !model.LabelName(yyDollar[1].item.Val).IsValid() { yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid label name for grouping: %q", yyDollar[1].item.Val) } yyVAL.item = yyDollar[1].item @@ -1334,7 +1336,7 @@ yydefault: yyDollar = yyS[yypt-1 : yypt+1] { unquoted := yylex.(*parser).unquoteString(yyDollar[1].item.Val) - if !labels.IsValidLabelName(unquoted, yylex.(*parser).validationScheme) { + if !model.LabelName(unquoted).IsValid() { yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid label name for grouping: %q", unquoted) } yyVAL.item = yyDollar[1].item diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go index fa8cc2ec852..03c6a8446a0 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go @@ -71,8 +71,6 @@ type parser struct { generatedParserResult interface{} parseErrors ParseErrors - - validationScheme model.ValidationScheme } type Opt func(p *parser) @@ -83,14 +81,6 @@ func WithFunctions(functions map[string]*Function) Opt { } } -// WithValidationScheme controls how metric/label names are validated. -// Defaults to UTF8Validation. -func WithValidationScheme(scheme model.ValidationScheme) Opt { - return func(p *parser) { - p.validationScheme = scheme - } -} - // NewParser returns a new parser. func NewParser(input string, opts ...Opt) *parser { //nolint:revive // unexported-return p := parserPool.Get().(*parser) @@ -100,7 +90,6 @@ func NewParser(input string, opts ...Opt) *parser { //nolint:revive // unexporte p.parseErrors = nil p.generatedParserResult = nil p.closingParens = make([]posrange.Pos, 0) - p.validationScheme = model.UTF8Validation // Clear lexer struct before reusing. p.lex = Lexer{ diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test index 2fee20e630c..576b36868f1 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test @@ -232,30 +232,38 @@ load 5m http_requests_histogram{job="api-server", instance="3", group="canary"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}} eval instant at 0m max(http_requests) + expect no_info {} 4 # The histogram is ignored here so the result doesn't change but it has an info annotation now. -eval_info instant at 0m max({job="api-server"}) +eval instant at 0m max({job="api-server"}) + expect info {} 4 # The histogram is ignored here so there is no result but it has an info annotation now. -eval_info instant at 0m max(http_requests_histogram) +eval instant at 0m max(http_requests_histogram) + expect info eval instant at 0m min(http_requests) + expect no_info {} 1 # The histogram is ignored here so the result doesn't change but it has an info annotation now. -eval_info instant at 0m min({job="api-server"}) +eval instant at 0m min({job="api-server"}) + expect info {} 1 # The histogram is ignored here so there is no result but it has an info annotation now. -eval_info instant at 0m min(http_requests_histogram) +eval instant at 0m min(http_requests_histogram) + expect info eval instant at 0m max by (group) (http_requests) + expect no_info {group="production"} 2 {group="canary"} 4 eval instant at 0m min by (group) (http_requests) + expect no_info {group="production"} 1 {group="canary"} 3 @@ -276,26 +284,31 @@ load 5m http_requests_histogram{job="api-server", instance="3", group="production"} {{schema:0 sum:20 count:20}}x11 foo 1+1x9 3 -eval_ordered instant at 50m topk(3, http_requests) +eval instant at 50m topk(3, http_requests) + expect ordered http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="1", job="app-server"} 600 -eval_ordered instant at 50m topk((3), (http_requests)) +eval instant at 50m topk((3), (http_requests)) + expect ordered http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="1", job="app-server"} 600 -eval_ordered instant at 50m topk(5, http_requests{group="canary",job="app-server"}) +eval instant at 50m topk(5, http_requests{group="canary",job="app-server"}) + expect ordered http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="canary", instance="0", job="app-server"} 700 -eval_ordered instant at 50m bottomk(3, http_requests) +eval instant at 50m bottomk(3, http_requests) + expect ordered http_requests{group="production", instance="0", job="api-server"} 100 http_requests{group="production", instance="1", job="api-server"} 200 http_requests{group="canary", instance="0", job="api-server"} 300 -eval_ordered instant at 50m bottomk(5, http_requests{group="canary",job="app-server"}) +eval instant at 50m bottomk(5, http_requests{group="canary",job="app-server"}) + expect ordered http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="canary", instance="1", job="app-server"} 800 @@ -309,33 +322,39 @@ eval instant at 50m bottomk by (group) (2, http_requests) http_requests{group="production", instance="0", job="api-server"} 100 http_requests{group="production", instance="1", job="api-server"} 200 -eval_ordered instant at 50m bottomk by (group) (2, http_requests{group="production"}) +eval instant at 50m bottomk by (group) (2, http_requests{group="production"}) + expect ordered http_requests{group="production", instance="0", job="api-server"} 100 http_requests{group="production", instance="1", job="api-server"} 200 # Test NaN is sorted away from the top/bottom. -eval_ordered instant at 50m topk(3, http_requests{job="api-server",group="production"}) +eval instant at 50m topk(3, http_requests{job="api-server",group="production"}) + expect ordered http_requests{job="api-server", instance="1", group="production"} 200 http_requests{job="api-server", instance="0", group="production"} 100 http_requests{job="api-server", instance="2", group="production"} NaN -eval_ordered instant at 50m bottomk(3, http_requests{job="api-server",group="production"}) +eval instant at 50m bottomk(3, http_requests{job="api-server",group="production"}) + expect ordered http_requests{job="api-server", instance="0", group="production"} 100 http_requests{job="api-server", instance="1", group="production"} 200 http_requests{job="api-server", instance="2", group="production"} NaN # Test topk and bottomk allocate min(k, input_vector) for results vector -eval_ordered instant at 50m bottomk(9999999999, http_requests{job="app-server",group="canary"}) +eval instant at 50m bottomk(9999999999, http_requests{job="app-server",group="canary"}) + expect ordered http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="canary", instance="1", job="app-server"} 800 -eval_ordered instant at 50m topk(9999999999, http_requests{job="api-server",group="production"}) +eval instant at 50m topk(9999999999, http_requests{job="api-server",group="production"}) + expect ordered http_requests{job="api-server", instance="1", group="production"} 200 http_requests{job="api-server", instance="0", group="production"} 100 http_requests{job="api-server", instance="2", group="production"} NaN # Bug #5276. -eval_ordered instant at 50m topk(scalar(foo), http_requests) +eval instant at 50m topk(scalar(foo), http_requests) + expect ordered http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="1", job="app-server"} 600 @@ -348,42 +367,54 @@ eval range from 0m to 50m step 5m count(bottomk(scalar(foo), http_requests)) {} 1 2 3 4 5 6 7 8 9 9 3 # Tests for histogram: should ignore histograms. -eval_info instant at 50m topk(100, http_requests_histogram) +eval instant at 50m topk(100, http_requests_histogram) + expect info #empty -eval_info range from 0 to 50m step 5m topk(100, http_requests_histogram) +eval range from 0 to 50m step 5m topk(100, http_requests_histogram) + expect info #empty -eval_info instant at 50m topk(1, {__name__=~"http_requests(_histogram)?"}) +eval instant at 50m topk(1, {__name__=~"http_requests(_histogram)?"}) + expect info {__name__="http_requests", group="canary", instance="1", job="app-server"} 800 -eval_info instant at 50m count(topk(1000, {__name__=~"http_requests(_histogram)?"})) +eval instant at 50m count(topk(1000, {__name__=~"http_requests(_histogram)?"})) + expect info {} 9 -eval_info range from 0 to 50m step 5m count(topk(1000, {__name__=~"http_requests(_histogram)?"})) +eval range from 0 to 50m step 5m count(topk(1000, {__name__=~"http_requests(_histogram)?"})) + expect info {} 9x10 -eval_info instant at 50m topk by (instance) (1, {__name__=~"http_requests(_histogram)?"}) +eval instant at 50m topk by (instance) (1, {__name__=~"http_requests(_histogram)?"}) + expect info {__name__="http_requests", group="canary", instance="0", job="app-server"} 700 {__name__="http_requests", group="canary", instance="1", job="app-server"} 800 {__name__="http_requests", group="production", instance="2", job="api-server"} NaN -eval_info instant at 50m bottomk(100, http_requests_histogram) +eval instant at 50m bottomk(100, http_requests_histogram) + expect info #empty -eval_info range from 0 to 50m step 5m bottomk(100, http_requests_histogram) +eval range from 0 to 50m step 5m bottomk(100, http_requests_histogram) + expect info #empty -eval_info instant at 50m bottomk(1, {__name__=~"http_requests(_histogram)?"}) +eval instant at 50m bottomk(1, {__name__=~"http_requests(_histogram)?"}) + expect info {__name__="http_requests", group="production", instance="0", job="api-server"} 100 -eval_info instant at 50m count(bottomk(1000, {__name__=~"http_requests(_histogram)?"})) +eval instant at 50m count(bottomk(1000, {__name__=~"http_requests(_histogram)?"})) + expect info {} 9 -eval_info range from 0 to 50m step 5m count(bottomk(1000, {__name__=~"http_requests(_histogram)?"})) +eval range from 0 to 50m step 5m count(bottomk(1000, {__name__=~"http_requests(_histogram)?"})) + expect info {} 9x10 -eval_info instant at 50m bottomk by (instance) (1, {__name__=~"http_requests(_histogram)?"}) +eval instant at 50m bottomk by (instance) (1, {__name__=~"http_requests(_histogram)?"}) + expect info {__name__="http_requests", group="production", instance="0", job="api-server"} 100 {__name__="http_requests", group="production", instance="1", job="api-server"} 200 {__name__="http_requests", group="production", instance="2", job="api-server"} NaN @@ -447,8 +478,8 @@ eval instant at 1m count_values by (job, group)("job", version) {job="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}", group="canary"} 2 # Test an invalid label value. -eval_fail instant at 0 count_values("a\xc5z", version) - expected_fail_message invalid label name "a\xc5z" +eval instant at 0 count_values("a\xc5z", version) + expect fail msg:invalid label name "a\xc5z" # Tests for quantile. clear @@ -462,46 +493,67 @@ load 10s data{test="uneven samples",point="a"} 0 data{test="uneven samples",point="b"} 1 data{test="uneven samples",point="c"} 4 + data{test="NaN sample",point="a"} 0 + data{test="NaN sample",point="b"} 1 + data{test="NaN sample",point="c"} NaN data_histogram{test="histogram sample", point="c"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}} foo 0 1 0 1 0 1 0.8 +# 80th percentile. +# The NaN sample is treated as the smallest possible value. eval instant at 1m quantile without(point)(0.8, data) + expect no_info {test="two samples"} 0.8 {test="three samples"} 1.6 {test="uneven samples"} 2.8 + {test="NaN sample"} 0.6 + +# 20th percentile. +# A quantile between NaN and 0 is interpolated as NaN. +eval instant at 1m quantile without(point)(0.2, data) + {test="two samples"} 0.2 + {test="three samples"} 0.4 + {test="uneven samples"} 0.4 + {test="NaN sample"} NaN # The histogram is ignored here so the result doesn't change but it has an info annotation now. -eval_info instant at 1m quantile without(point)(0.8, {__name__=~"data(_histogram)?"}) +eval instant at 1m quantile without(point)(0.8, {__name__=~"data(_histogram)?"}) + expect info {test="two samples"} 0.8 {test="three samples"} 1.6 {test="uneven samples"} 2.8 + {test="NaN sample"} 0.6 # The histogram is ignored here so there is no result but it has an info annotation now. -eval_info instant at 1m quantile(0.8, data_histogram) +eval instant at 1m quantile(0.8, data_histogram) + expect info # Bug #5276. eval instant at 1m quantile without(point)(scalar(foo), data) {test="two samples"} 0.8 {test="three samples"} 1.6 {test="uneven samples"} 2.8 - + {test="NaN sample"} 0.6 eval instant at 1m quantile without(point)((scalar(foo)), data) {test="two samples"} 0.8 {test="three samples"} 1.6 {test="uneven samples"} 2.8 + {test="NaN sample"} 0.6 eval instant at 1m quantile without(point)(NaN, data) expect warn msg: PromQL warning: quantile value should be between 0 and 1, got NaN - {test="two samples"} NaN - {test="three samples"} NaN - {test="uneven samples"} NaN + {test="two samples"} NaN + {test="three samples"} NaN + {test="uneven samples"} NaN + {test="NaN sample"} NaN # Bug #15971. eval range from 0m to 1m step 10s quantile without(point) (scalar(foo), data) {test="two samples"} 0 1 0 1 0 1 0.8 {test="three samples"} 0 2 0 2 0 2 1.6 {test="uneven samples"} 0 4 0 4 0 4 2.8 + {test="NaN sample"} NaN 1 NaN 1 NaN 1 0.6 # Tests for group. clear @@ -745,22 +797,28 @@ load 5m series{label="c"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} # The histogram is ignored here so the result doesn't change but it has an info annotation now. -eval_info instant at 0m stddev(series) +eval instant at 0m stddev(series) + expect info {} 0.5 -eval_info instant at 0m stdvar(series) +eval instant at 0m stdvar(series) + expect info {} 0.25 # The histogram is ignored here so there is no result but it has an info annotation now. -eval_info instant at 0m stddev({label="c"}) +eval instant at 0m stddev({label="c"}) + expect info -eval_info instant at 0m stdvar({label="c"}) +eval instant at 0m stdvar({label="c"}) + expect info -eval_info instant at 0m stddev by (label) (series) +eval instant at 0m stddev by (label) (series) + expect info {label="a"} 0 {label="b"} 0 -eval_info instant at 0m stdvar by (label) (series) +eval instant at 0m stdvar by (label) (series) + expect info {label="a"} 0 {label="b"} 0 @@ -771,17 +829,21 @@ load 5m series{label="b"} 1 series{label="c"} 2 -eval_info instant at 0m stddev(series) +eval instant at 0m stddev(series) + expect info {} 0.5 -eval_info instant at 0m stdvar(series) +eval instant at 0m stdvar(series) + expect info {} 0.25 -eval_info instant at 0m stddev by (label) (series) +eval instant at 0m stddev by (label) (series) + expect info {label="b"} 0 {label="c"} 0 -eval_info instant at 0m stdvar by (label) (series) +eval instant at 0m stdvar by (label) (series) + expect info {label="b"} 0 {label="c"} 0 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test index 1ad301bdb7d..4091f7eabf2 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test @@ -90,8 +90,7 @@ eval instant at 25s sum_over_time(metric{job="1"}[100] offset 50s @ 100) eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100 {job="1"} 15 -# Note that this triggers an info annotation because we are rate'ing a metric that does not end in `_total`. -eval_info instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metric{job="2"}[123s] @ 200), "job", "1", "", "") +eval instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metric{job="2"}[123s] @ 200), "job", "1", "", "") {job="1"} 0.3 eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "") diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/collision.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/collision.test index 4dcdfa4ddf7..8addf02be9b 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/collision.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/collision.test @@ -17,6 +17,7 @@ load 5m testmetric1{src="a",dst="b"} 0 testmetric2{src="a",dst="b"} 1 -eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'}) +eval instant at 0m ceil({__name__=~'testmetric1|testmetric2'}) + expect fail clear diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test index 037e40923ae..b1eda909f83 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test @@ -228,37 +228,48 @@ load 5m http_requests_histogram{path="/g"} 0 0 {{schema:-53 sum:3 count:3 custom_values:[1] buckets:[3]}} {{schema:-53 sum:3 count:3 custom_values:[5 10] buckets:[3]}} eval instant at 50m irate(http_requests_total[50m]) + expect no_warn {path="/foo"} .03333333333333333333 {path="/bar"} .03333333333333333333 # Counter reset. eval instant at 30m irate(http_requests_total[50m]) + expect no_warn {path="/foo"} .03333333333333333333 {path="/bar"} 0 eval range from 0 to 20m step 5m irate(http_requests_nan[15m1s]) + expect no_warn {} _ NaN NaN NaN 0.02 eval instant at 20m irate(http_requests_histogram{path="/a"}[20m]) + expect no_warn {path="/a"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}} eval instant at 20m irate(http_requests_histogram{path="/b"}[20m]) + expect no_warn {path="/b"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}} eval instant at 20m irate(http_requests_histogram{path="/b"}[6m]) + expect no_warn -eval_warn instant at 20m irate(http_requests_histogram{path="/c"}[20m]) +eval instant at 20m irate(http_requests_histogram{path="/c"}[20m]) + expect warn {path="/c"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}} -eval_warn instant at 20m irate(http_requests_histogram{path="/d"}[20m]) +eval instant at 20m irate(http_requests_histogram{path="/d"}[20m]) + expect warn {path="/d"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}} -eval_warn instant at 20m irate(http_requests_histogram{path="/e"}[20m]) +eval instant at 20m irate(http_requests_histogram{path="/e"}[20m]) + expect warn eval instant at 20m irate(http_requests_histogram{path="/f"}[20m]) + expect no_warn {path="/f"} {{schema:-53 sum:0.01 count:0.01 custom_values:[5 10] buckets:[0.01]}} eval instant at 20m irate(http_requests_histogram{path="/g"}[20m]) + expect no_warn {path="/g"} {{schema:-53 sum:0.01 count:0.01 custom_values:[5 10] buckets:[0.01]}} clear @@ -272,18 +283,22 @@ load 5m http_requests_mix{path="/foo"} 0 50 100 {{schema:0 sum:0 count:0 buckets:[0 0 0] counter_reset_hint:gauge}} {{schema:0 sum:1 count:2 buckets:[1 1 1] counter_reset_hint:gauge}} eval instant at 20m delta(http_requests[20m]) + expect no_warn {path="/foo"} 200 {path="/bar"} -200 eval instant at 20m delta(http_requests_gauge[20m]) + expect no_warn {path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}} # delta emits warn annotation for non-gauge histogram types. -eval_warn instant at 20m delta(http_requests_counter[20m]) +eval instant at 20m delta(http_requests_counter[20m]) + expect warn {path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}} # delta emits warn annotation for mix of histogram and floats. -eval_warn instant at 20m delta(http_requests_mix[20m]) +eval instant at 20m delta(http_requests_mix[20m]) + expect warn #empty clear @@ -302,31 +317,41 @@ load 5m http_requests_histogram{path="/g"} 0 0 {{schema:-53 sum:1 count:1 custom_values:[1] buckets:[2] counter_reset_hint:gauge}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1] counter_reset_hint:gauge}} eval instant at 20m idelta(http_requests[20m]) + expect no_warn {path="/foo"} 50 {path="/bar"} -50 eval range from 0 to 20m step 5m idelta(http_requests_nan[15m1s]) + expect no_warn {} _ NaN NaN NaN 6 eval instant at 20m idelta(http_requests_histogram{path="/a"}[20m]) + expect no_warn {path="/a"} {{sum:1 count:3 counter_reset_hint:gauge}} eval instant at 20m idelta(http_requests_histogram{path="/b"}[20m]) + expect no_warn {path="/b"} {{sum:1 count:1 counter_reset_hint:gauge}} eval instant at 20m idelta(http_requests_histogram{path="/b"}[6m]) + expect no_warn -eval_warn instant at 20m idelta(http_requests_histogram{path="/c"}[20m]) +eval instant at 20m idelta(http_requests_histogram{path="/c"}[20m]) + expect warn {path="/c"} {{sum:1 count:1 counter_reset_hint:gauge}} -eval_warn instant at 20m idelta(http_requests_histogram{path="/d"}[20m]) +eval instant at 20m idelta(http_requests_histogram{path="/d"}[20m]) + expect warn {path="/d"} {{sum:1 count:1 counter_reset_hint:gauge}} -eval_warn instant at 20m idelta(http_requests_histogram{path="/e"}[20m]) +eval instant at 20m idelta(http_requests_histogram{path="/e"}[20m]) + expect warn -eval_warn instant at 20m idelta(http_requests_histogram{path="/f"}[20m]) +eval instant at 20m idelta(http_requests_histogram{path="/f"}[20m]) + expect warn -eval_warn instant at 20m idelta(http_requests_histogram{path="/g"}[20m]) +eval instant at 20m idelta(http_requests_histogram{path="/g"}[20m]) + expect warn clear @@ -341,28 +366,36 @@ load 5m # deriv should return the same as rate in simple cases. eval instant at 50m rate(http_requests_total{group="canary", instance="1", job="app-server"}[50m]) + expect no_info {group="canary", instance="1", job="app-server"} 0.26666666666666666 eval instant at 50m deriv(http_requests_total{group="canary", instance="1", job="app-server"}[50m]) + expect no_info {group="canary", instance="1", job="app-server"} 0.26666666666666666 # deriv should return correct result. eval instant at 50m deriv(testcounter_reset_middle_total[100m]) + expect no_info {} 0.010606060606060607 # deriv should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. -eval_info instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m]) +eval instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m]) + expect info {group="canary", instance="1", job="app-server"} 0.26666666666666666 -eval_info instant at 100m deriv(testcounter_reset_middle_mix[110m]) +eval instant at 100m deriv(testcounter_reset_middle_mix[110m]) + expect info {} 0.010606060606060607 # deriv should silently ignore ranges consisting only of histograms. eval instant at 50m deriv(http_requests_histogram[60m]) + expect no_info + expect no_warn #empty # deriv should return NaN in case of +Inf or -Inf found. eval instant at 100m deriv(http_requests_inf[100m]) + expect no_info {job="app-server", instance="1", group="canary"} NaN # predict_linear should return correct result. @@ -380,35 +413,45 @@ eval instant at 100m deriv(http_requests_inf[100m]) # intercept at t=3000: 38.63636363636364 # intercept at t=3000+3600: 76.81818181818181 eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 3600) + expect no_info {} 70 eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 1h) + expect no_info {} 70 # intercept at t = 3000+3600 = 6600 eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) + expect no_info {} 76.81818181818181 eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 1h) + expect no_info {} 76.81818181818181 # intercept at t = 600+3600 = 4200 eval instant at 10m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) + expect no_info {} 51.36363636363637 # intercept at t = 4200+3600 = 7800 eval instant at 70m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) + expect no_info {} 89.54545454545455 # predict_linear should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. -eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 3000) +eval instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 3000) + expect info {} 70 -eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 50m) +eval instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 50m) + expect info {} 70 # predict_linear should silently ignore ranges consisting only of histograms. eval instant at 60m predict_linear(http_requests_histogram[60m], 50m) + expect no_info + expect no_warn #empty # predict_linear should return NaN in case of +Inf or -Inf found. @@ -471,13 +514,16 @@ eval instant at 0m label_replace(testmetric, "dst", "", "dst", ".*") testmetric{src="source-value-20"} 1 # label_replace fails when the regex is invalid. -eval_fail instant at 0m label_replace(testmetric, "dst", "value-$1", "src", "(.*") +eval instant at 0m label_replace(testmetric, "dst", "value-$1", "src", "(.*") + expect fail # label_replace fails when the destination label name is not a valid Prometheus label name. -eval_fail instant at 0m label_replace(testmetric, "\xff", "", "src", "(.*)") +eval instant at 0m label_replace(testmetric, "\xff", "", "src", "(.*)") + expect fail # label_replace fails when there would be duplicated identical output label sets. -eval_fail instant at 0m label_replace(testmetric, "src", "", "", "") +eval instant at 0m label_replace(testmetric, "src", "", "", "") + expect fail clear @@ -540,8 +586,8 @@ eval instant at 0m label_join(testmetric1, "dst", ", ", "src", "src1", "src2") testmetric1{src="foo",src1="bar",src2="foobar",dst="foo, bar, foobar"} 0 testmetric1{src="fizz",src1="buzz",src2="fizzbuzz",dst="fizz, buzz, fizzbuzz"} 1 -eval_fail instant at 0m label_join(dup, "label", "", "this") - expected_fail_message vector cannot contain metrics with the same labelset +eval instant at 0m label_join(dup, "label", "", "this") + expect fail msg:vector cannot contain metrics with the same labelset clear @@ -652,7 +698,8 @@ load 5m http_requests{job="app-server", instance="1", group="canary"} 0+80x10 http_requests{job="app-server", instance="2", group="canary"} {{schema:0 sum:1 count:1}}x15 -eval_ordered instant at 50m sort(http_requests) +eval instant at 50m sort(http_requests) + expect ordered http_requests{group="production", instance="0", job="api-server"} 100 http_requests{group="production", instance="1", job="api-server"} 200 http_requests{group="canary", instance="0", job="api-server"} 300 @@ -663,7 +710,8 @@ eval_ordered instant at 50m sort(http_requests) http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="canary", instance="2", job="api-server"} NaN -eval_ordered instant at 50m sort_desc(http_requests) +eval instant at 50m sort_desc(http_requests) + expect ordered http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="1", job="app-server"} 600 @@ -701,7 +749,8 @@ load 5m node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 0+10x10 node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 0+10x10 -eval_ordered instant at 50m sort_by_label(http_requests, "instance") +eval instant at 50m sort_by_label(http_requests, "instance") + expect ordered http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="0", job="api-server"} 100 @@ -713,7 +762,8 @@ eval_ordered instant at 50m sort_by_label(http_requests, "instance") http_requests{group="canary", instance="2", job="api-server"} NaN http_requests{group="production", instance="2", job="api-server"} 100 -eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group") +eval instant at 50m sort_by_label(http_requests, "instance", "group") + expect ordered http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="0", job="api-server"} 100 @@ -725,7 +775,8 @@ eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group") http_requests{group="canary", instance="2", job="api-server"} NaN http_requests{group="production", instance="2", job="api-server"} 100 -eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group") +eval instant at 50m sort_by_label(http_requests, "instance", "group") + expect ordered http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="0", job="api-server"} 100 @@ -737,7 +788,8 @@ eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group") http_requests{group="canary", instance="2", job="api-server"} NaN http_requests{group="production", instance="2", job="api-server"} 100 -eval_ordered instant at 50m sort_by_label(http_requests, "group", "instance", "job") +eval instant at 50m sort_by_label(http_requests, "group", "instance", "job") + expect ordered http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="canary", instance="1", job="api-server"} 400 @@ -749,7 +801,8 @@ eval_ordered instant at 50m sort_by_label(http_requests, "group", "instance", "j http_requests{group="production", instance="1", job="app-server"} 600 http_requests{group="production", instance="2", job="api-server"} 100 -eval_ordered instant at 50m sort_by_label(http_requests, "job", "instance", "group") +eval instant at 50m sort_by_label(http_requests, "job", "instance", "group") + expect ordered http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="production", instance="0", job="api-server"} 100 http_requests{group="canary", instance="1", job="api-server"} 400 @@ -761,7 +814,8 @@ eval_ordered instant at 50m sort_by_label(http_requests, "job", "instance", "gro http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="production", instance="1", job="app-server"} 600 -eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance") +eval instant at 50m sort_by_label_desc(http_requests, "instance") + expect ordered http_requests{group="production", instance="2", job="api-server"} 100 http_requests{group="canary", instance="2", job="api-server"} NaN http_requests{group="production", instance="1", job="app-server"} 600 @@ -773,7 +827,8 @@ eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance") http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="canary", instance="0", job="api-server"} 300 -eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group") +eval instant at 50m sort_by_label_desc(http_requests, "instance", "group") + expect ordered http_requests{group="production", instance="2", job="api-server"} 100 http_requests{group="canary", instance="2", job="api-server"} NaN http_requests{group="production", instance="1", job="app-server"} 600 @@ -785,7 +840,8 @@ eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="canary", instance="0", job="api-server"} 300 -eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group", "job") +eval instant at 50m sort_by_label_desc(http_requests, "instance", "group", "job") + expect ordered http_requests{group="production", instance="2", job="api-server"} 100 http_requests{group="canary", instance="2", job="api-server"} NaN http_requests{group="production", instance="1", job="app-server"} 600 @@ -797,7 +853,8 @@ eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="canary", instance="0", job="api-server"} 300 -eval_ordered instant at 50m sort_by_label(cpu_time_total, "cpu") +eval instant at 50m sort_by_label(cpu_time_total, "cpu") + expect ordered cpu_time_total{job="cpu", cpu="0"} 100 cpu_time_total{job="cpu", cpu="1"} 100 cpu_time_total{job="cpu", cpu="2"} 100 @@ -809,12 +866,14 @@ eval_ordered instant at 50m sort_by_label(cpu_time_total, "cpu") cpu_time_total{job="cpu", cpu="21"} 100 cpu_time_total{job="cpu", cpu="100"} 100 -eval_ordered instant at 50m sort_by_label(node_uname_info, "instance") +eval instant at 50m sort_by_label(node_uname_info, "instance") + expect ordered node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100 node_uname_info{job="node_exporter", instance="4m600", release="1.2.3"} 100 node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100 -eval_ordered instant at 50m sort_by_label(node_uname_info, "release") +eval instant at 50m sort_by_label(node_uname_info, "release") + expect ordered node_uname_info{job="node_exporter", instance="4m600", release="1.2.3"} 100 node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100 node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100 @@ -835,13 +894,15 @@ load 10s http_requests_histogram{job="api-server", instance="1", group="canary"} {{schema:0 count:1 sum:2}}x1000 eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) + expect no_info {job="api-server", instance="0", group="production"} 8000 {job="api-server", instance="1", group="production"} 16000 {job="api-server", instance="0", group="canary"} 24000 {job="api-server", instance="1", group="canary"} 32000 # double_exponential_smoothing should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. -eval_info instant at 20010s double_exponential_smoothing(http_requests_mix[1m], 0.01, 0.1) +eval instant at 20010s double_exponential_smoothing(http_requests_mix[1m], 0.01, 0.1) + expect info {job="api-server", instance="0", group="production"} 30100 {job="api-server", instance="1", group="production"} 30200 {job="api-server", instance="0", group="canary"} 80300 @@ -849,6 +910,7 @@ eval_info instant at 20010s double_exponential_smoothing(http_requests_mix[1m], # double_exponential_smoothing should silently ignore ranges consisting only of histograms. eval instant at 10000s double_exponential_smoothing(http_requests_histogram[1m], 0.01, 0.1) + expect no_info #empty # negative trends @@ -994,10 +1056,12 @@ eval instant at 55s sum_over_time(metric11[1m])/count_over_time(metric11[1m]) {} NaN # Tests for samples with mix of floats and histograms. -eval_warn instant at 55s sum_over_time(metric12[1m]) +eval instant at 55s sum_over_time(metric12[1m]) + expect warn # no result. -eval_warn instant at 55s avg_over_time(metric12[1m]) +eval instant at 55s avg_over_time(metric12[1m]) + expect warn # no result. # Tests for samples with only histograms. @@ -1184,13 +1248,16 @@ eval instant at 1m stddev_over_time((metric[2m])) eval instant at 1m stddev_over_time(metric_histogram{type="only_histogram"}[2m]) #empty -eval_info instant at 1m stddev_over_time(metric_histogram{type="mix"}[2m]) +eval instant at 1m stddev_over_time(metric_histogram{type="mix"}[2m]) + expect info {type="mix"} 0 eval instant at 1m stdvar_over_time(metric_histogram{type="only_histogram"}[2m]) + expect no_info #empty -eval_info instant at 1m stdvar_over_time(metric_histogram{type="mix"}[2m]) +eval instant at 1m stdvar_over_time(metric_histogram{type="mix"}[2m]) + expect info {type="mix"} 0 # Tests for stddev_over_time and stdvar_over_time #4927. @@ -1212,12 +1279,15 @@ load 10s metric_histogram{type="mix"} 1 1 1 {{schema:1 sum:2 count:3}} {{schema:1 sum:2 count:3}} eval instant at 70s mad_over_time(metric[70s]) + expect no_info {} 1 eval instant at 70s mad_over_time(metric_histogram{type="only_histogram"}[70s]) + expect no_info #empty -eval_info instant at 70s mad_over_time(metric_histogram{type="mix"}[70s]) +eval instant at 70s mad_over_time(metric_histogram{type="mix"}[70s]) + expect info {type="mix"} 0 # Tests for ts_of_max_over_time and ts_of_min_over_time. Using odd scrape interval to test for rounding bugs. @@ -1261,49 +1331,69 @@ load 10s data_histogram{test="mix samples"} 0 1 2 {{schema:0 sum:1 count:2}}x2 eval instant at 1m quantile_over_time(0, data[2m]) + expect no_info + expect no_warn {test="two samples"} 0 {test="three samples"} 0 {test="uneven samples"} 0 eval instant at 1m quantile_over_time(0.5, data[2m]) + expect no_info + expect no_warn {test="two samples"} 0.5 {test="three samples"} 1 {test="uneven samples"} 1 eval instant at 1m quantile_over_time(0.75, data[2m]) + expect no_info + expect no_warn {test="two samples"} 0.75 {test="three samples"} 1.5 {test="uneven samples"} 2.5 eval instant at 1m quantile_over_time(0.8, data[2m]) + expect no_info + expect no_warn {test="two samples"} 0.8 {test="three samples"} 1.6 {test="uneven samples"} 2.8 eval instant at 1m quantile_over_time(1, data[2m]) + expect no_info + expect no_warn {test="two samples"} 1 {test="three samples"} 2 {test="uneven samples"} 4 -eval_warn instant at 1m quantile_over_time(-1, data[2m]) +eval instant at 1m quantile_over_time(-1, data[2m]) + expect no_info + expect warn {test="two samples"} -Inf {test="three samples"} -Inf {test="uneven samples"} -Inf -eval_warn instant at 1m quantile_over_time(2, data[2m]) +eval instant at 1m quantile_over_time(2, data[2m]) + expect no_info + expect warn {test="two samples"} +Inf {test="three samples"} +Inf {test="uneven samples"} +Inf -eval_warn instant at 1m (quantile_over_time(2, (data[2m]))) +eval instant at 1m (quantile_over_time(2, (data[2m]))) + expect no_info + expect warn {test="two samples"} +Inf {test="three samples"} +Inf {test="uneven samples"} +Inf eval instant at 1m quantile_over_time(0.5, data_histogram{test="only histogram samples"}[2m]) + expect no_info + expect no_warn #empty -eval_info instant at 1m quantile_over_time(0.5, data_histogram{test="mix samples"}[2m]) +eval instant at 1m quantile_over_time(0.5, data_histogram{test="mix samples"}[2m]) + expect info + expect no_warn {test="mix samples"} 1 clear @@ -1417,7 +1507,8 @@ load 5m testmetric1{src="a",dst="b"} 0 testmetric2{src="a",dst="b"} 1 -eval_fail instant at 0m changes({__name__=~'testmetric1|testmetric2'}[5m]) +eval instant at 0m changes({__name__=~'testmetric1|testmetric2'}[5m]) + expect fail clear @@ -1432,6 +1523,7 @@ load 10s data_histogram{type="mix_samples"} 0 1 {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}} eval instant at 1m min_over_time(data[2m]) + expect no_info {type="numbers"} 0 {type="some_nan"} 0 {type="some_nan2"} 1 @@ -1439,12 +1531,15 @@ eval instant at 1m min_over_time(data[2m]) {type="only_nan"} NaN eval instant at 1m min_over_time(data_histogram{type="only_histogram"}[2m]) + expect no_info #empty -eval_info instant at 1m min_over_time(data_histogram{type="mix_samples"}[2m]) +eval instant at 1m min_over_time(data_histogram{type="mix_samples"}[2m]) + expect info {type="mix_samples"} 0 eval instant at 1m max_over_time(data[2m]) + expect no_info {type="numbers"} 3 {type="some_nan"} 2 {type="some_nan2"} 2 @@ -1452,12 +1547,15 @@ eval instant at 1m max_over_time(data[2m]) {type="only_nan"} NaN eval instant at 1m max_over_time(data_histogram{type="only_histogram"}[2m]) + expect no_info #empty -eval_info instant at 1m max_over_time(data_histogram{type="mix_samples"}[2m]) +eval instant at 1m max_over_time(data_histogram{type="mix_samples"}[2m]) + expect info {type="mix_samples"} 1 eval instant at 1m last_over_time({__name__=~"data(_histogram)?"}[2m]) + expect no_info data{type="numbers"} 3 data{type="some_nan"} NaN data{type="some_nan2"} 1 @@ -1467,6 +1565,7 @@ eval instant at 1m last_over_time({__name__=~"data(_histogram)?"}[2m]) data_histogram{type="mix_samples"} {{schema:0 sum:2 count:3}} eval instant at 1m count_over_time({__name__=~"data(_histogram)?"}[2m]) + expect no_info {type="numbers"} 3 {type="some_nan"} 3 {type="some_nan2"} 3 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test index bf13864277d..84a467a3145 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test @@ -70,74 +70,91 @@ load_with_nhcb 5m # Test histogram_count. eval instant at 50m histogram_count(testhistogram3) + expect no_warn {start="positive"} 110 {start="negative"} 20 # Classic way of accessing the count still works. eval instant at 50m testhistogram3_count + expect no_warn testhistogram3_count{start="positive"} 110 testhistogram3_count{start="negative"} 20 # Test histogram_sum. eval instant at 50m histogram_sum(testhistogram3) + expect no_warn {start="positive"} 330 {start="negative"} 80 # Classic way of accessing the sum still works. eval instant at 50m testhistogram3_sum + expect no_warn testhistogram3_sum{start="positive"} 330 testhistogram3_sum{start="negative"} 80 # Test histogram_avg. This has no classic equivalent. eval instant at 50m histogram_avg(testhistogram3) + expect no_warn {start="positive"} 3 {start="negative"} 4 # Test histogram_stddev. This has no classic equivalent. eval instant at 50m histogram_stddev(testhistogram3) + expect no_warn {start="positive"} 2.7435461458749795 {start="negative"} 4.187667907081458 # Test histogram_stdvar. This has no classic equivalent. eval instant at 50m histogram_stdvar(testhistogram3) + expect no_warn {start="positive"} 7.527045454545455 {start="negative"} 17.5365625 # Test histogram_fraction. # eval instant at 50m histogram_fraction(0, 4, testhistogram2) + expect no_warn {} 0.6666666666666666 eval instant at 50m histogram_fraction(0, 4, testhistogram2_bucket) + expect no_warn {} 0.6666666666666666 eval instant at 50m histogram_fraction(0, 6, testhistogram2) + expect no_warn {} 1 eval instant at 50m histogram_fraction(0, 6, testhistogram2_bucket) + expect no_warn {} 1 eval instant at 50m histogram_fraction(0, 3.5, testhistogram2) + expect no_warn {} 0.5833333333333334 eval instant at 50m histogram_fraction(0, 3.5, testhistogram2_bucket) + expect no_warn {} 0.5833333333333334 eval instant at 50m histogram_fraction(0, 0.2, testhistogram3) + expect no_warn {start="positive"} 0.6363636363636364 {start="negative"} 0 eval instant at 50m histogram_fraction(0, 0.2, testhistogram3_bucket) + expect no_warn {start="positive"} 0.6363636363636364 {start="negative"} 0 eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m])) + expect no_warn {start="positive"} 0.6363636363636364 {start="negative"} 0 eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3_bucket[10m])) + expect no_warn {start="positive"} 0.6363636363636364 {start="negative"} 0 @@ -145,80 +162,98 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3_bucket[10m])) # it exists) and divide by the count to get the same result. eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count + expect no_warn {start="positive"} 0.6363636363636364 eval instant at 50m rate(testhistogram3_bucket{le=".2"}[10m]) / ignoring(le) rate(testhistogram3_count[10m]) + expect no_warn {start="positive"} 0.6363636363636364 # Test histogram_quantile, native and classic. eval instant at 50m histogram_quantile(0, testhistogram3) + expect no_warn {start="positive"} 0 {start="negative"} -0.25 eval instant at 50m histogram_quantile(0, testhistogram3_bucket) + expect no_warn {start="positive"} 0 {start="negative"} -0.25 eval instant at 50m histogram_quantile(0.25, testhistogram3) + expect no_warn {start="positive"} 0.055 {start="negative"} -0.225 eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket) + expect no_warn {start="positive"} 0.055 {start="negative"} -0.225 eval instant at 50m histogram_quantile(0.5, testhistogram3) + expect no_warn {start="positive"} 0.125 {start="negative"} -0.2 eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket) + expect no_warn {start="positive"} 0.125 {start="negative"} -0.2 eval instant at 50m histogram_quantile(0.75, testhistogram3) + expect no_warn {start="positive"} 0.45 {start="negative"} -0.15 eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket) + expect no_warn {start="positive"} 0.45 {start="negative"} -0.15 eval instant at 50m histogram_quantile(1, testhistogram3) + expect no_warn {start="positive"} 1 {start="negative"} -0.1 eval instant at 50m histogram_quantile(1, testhistogram3_bucket) + expect no_warn {start="positive"} 1 {start="negative"} -0.1 # Quantile too low. -eval_warn instant at 50m histogram_quantile(-0.1, testhistogram) +eval instant at 50m histogram_quantile(-0.1, testhistogram) + expect warn {start="positive"} -Inf {start="negative"} -Inf -eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket) +eval instant at 50m histogram_quantile(-0.1, testhistogram_bucket) + expect warn {start="positive"} -Inf {start="negative"} -Inf # Quantile too high. -eval_warn instant at 50m histogram_quantile(1.01, testhistogram) +eval instant at 50m histogram_quantile(1.01, testhistogram) + expect warn {start="positive"} +Inf {start="negative"} +Inf -eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket) +eval instant at 50m histogram_quantile(1.01, testhistogram_bucket) + expect warn {start="positive"} +Inf {start="negative"} +Inf # Quantile invalid. -eval_warn instant at 50m histogram_quantile(NaN, testhistogram) +eval instant at 50m histogram_quantile(NaN, testhistogram) + expect warn {start="positive"} NaN {start="negative"} NaN -eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket) +eval instant at 50m histogram_quantile(NaN, testhistogram_bucket) + expect warn {start="positive"} NaN {start="negative"} NaN @@ -228,196 +263,244 @@ eval instant at 50m histogram_quantile(NaN, non_existent) # Quantile value in lowest bucket. eval instant at 50m histogram_quantile(0, testhistogram) + expect no_warn {start="positive"} 0 {start="negative"} -0.2 eval instant at 50m histogram_quantile(0, testhistogram_bucket) + expect no_warn {start="positive"} 0 {start="negative"} -0.2 # Quantile value in highest bucket. eval instant at 50m histogram_quantile(1, testhistogram) + expect no_warn {start="positive"} 1 {start="negative"} 0.3 eval instant at 50m histogram_quantile(1, testhistogram_bucket) + expect no_warn {start="positive"} 1 {start="negative"} 0.3 # Finally some useful quantiles. eval instant at 50m histogram_quantile(0.2, testhistogram) + expect no_warn {start="positive"} 0.048 {start="negative"} -0.2 eval instant at 50m histogram_quantile(0.2, testhistogram_bucket) + expect no_warn {start="positive"} 0.048 {start="negative"} -0.2 eval instant at 50m histogram_quantile(0.5, testhistogram) + expect no_warn {start="positive"} 0.15 {start="negative"} -0.15 eval instant at 50m histogram_quantile(0.5, testhistogram_bucket) + expect no_warn {start="positive"} 0.15 {start="negative"} -0.15 eval instant at 50m histogram_quantile(0.8, testhistogram) + expect no_warn {start="positive"} 0.72 {start="negative"} 0.3 eval instant at 50m histogram_quantile(0.8, testhistogram_bucket) + expect no_warn {start="positive"} 0.72 {start="negative"} 0.3 # More realistic with rates. eval instant at 50m histogram_quantile(0.2, rate(testhistogram[10m])) + expect no_warn {start="positive"} 0.048 {start="negative"} -0.2 eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m])) + expect no_warn {start="positive"} 0.048 {start="negative"} -0.2 eval instant at 50m histogram_quantile(0.5, rate(testhistogram[10m])) + expect no_warn {start="positive"} 0.15 {start="negative"} -0.15 eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m])) + expect no_warn {start="positive"} 0.15 {start="negative"} -0.15 eval instant at 50m histogram_quantile(0.8, rate(testhistogram[10m])) + expect no_warn {start="positive"} 0.72 {start="negative"} 0.3 eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m])) + expect no_warn {start="positive"} 0.72 {start="negative"} 0.3 # Want results exactly in the middle of the bucket. eval instant at 7m histogram_quantile(1./6., testhistogram2) + expect no_warn {} 1 eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket) + expect no_warn {} 1 eval instant at 7m histogram_quantile(0.5, testhistogram2) + expect no_warn {} 3 eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket) + expect no_warn {} 3 eval instant at 7m histogram_quantile(5./6., testhistogram2) + expect no_warn {} 5 eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket) + expect no_warn {} 5 eval instant at 47m histogram_quantile(1./6., rate(testhistogram2[15m])) + expect no_warn {} 1 eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m])) + expect no_warn {} 1 eval instant at 47m histogram_quantile(0.5, rate(testhistogram2[15m])) + expect no_warn {} 3 eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m])) + expect no_warn {} 3 eval instant at 47m histogram_quantile(5./6., rate(testhistogram2[15m])) + expect no_warn {} 5 eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) + expect no_warn {} 5 # Aggregated histogram: Everything in one. Note how native histograms # don't require aggregation by le. eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m]))) + expect no_warn {} 0.075 eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le)) + expect no_warn {} 0.075 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m]))) + expect no_warn {} 0.1277777777777778 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le)) + expect no_warn {} 0.1277777777777778 # Aggregated histogram: Everything in one. Now with avg, which does not change anything. eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[10m]))) + expect no_warn {} 0.075 eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le)) + expect no_warn {} 0.075 eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[10m]))) + expect no_warn {} 0.12777777777777778 eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le)) + expect no_warn {} 0.12777777777777778 # Aggregated histogram: By instance. eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (instance)) + expect no_warn {instance="ins1"} 0.075 {instance="ins2"} 0.075 eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) + expect no_warn {instance="ins1"} 0.075 {instance="ins2"} 0.075 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (instance)) + expect no_warn {instance="ins1"} 0.1333333333 {instance="ins2"} 0.125 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) + expect no_warn {instance="ins1"} 0.1333333333 {instance="ins2"} 0.125 # Aggregated histogram: By job. eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job)) + expect no_warn {job="job1"} 0.1 {job="job2"} 0.0642857142857143 eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) + expect no_warn {job="job1"} 0.1 {job="job2"} 0.0642857142857143 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job)) + expect no_warn {job="job1"} 0.14 {job="job2"} 0.1125 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) + expect no_warn {job="job1"} 0.14 {job="job2"} 0.1125 # Aggregated histogram: By job and instance. eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job, instance)) + expect no_warn {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) + expect no_warn {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job, instance)) + expect no_warn {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.1333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.1166666666666667 eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) + expect no_warn {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.1333333333333333 {instance="ins1", job="job2"} 0.1 @@ -425,24 +508,28 @@ eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bu # The unaggregated histogram for comparison. Same result as the previous one. eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[10m])) + expect no_warn {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m])) + expect no_warn {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[10m])) + expect no_warn {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.13333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.11666666666666667 eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m])) + expect no_warn {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.13333333333333333 {instance="ins1", job="job2"} 0.1 @@ -450,25 +537,32 @@ eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket # All NHCBs summed into one. eval instant at 50m sum(request_duration_seconds) + expect no_warn {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} eval instant at 50m sum(request_duration_seconds{job="job1",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job1",instance="ins2"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins2"}) + expect no_warn {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} eval instant at 50m avg(request_duration_seconds) + expect no_warn {} {{schema:-53 count:62.5 custom_values:[0.1 0.2] buckets:[25 22.5 15]}} # To verify the result above, calculate from classic histogram as well. eval instant at 50m avg (request_duration_seconds_bucket{le="0.1"}) + expect no_warn {} 25 eval instant at 50m avg (request_duration_seconds_bucket{le="0.2"}) - avg (request_duration_seconds_bucket{le="0.1"}) + expect no_warn {} 22.5 eval instant at 50m avg (request_duration_seconds_bucket{le="+Inf"}) - avg (request_duration_seconds_bucket{le="0.2"}) + expect no_warn {} 15 eval instant at 50m count(request_duration_seconds) + expect no_warn {} 4 # A histogram with nonmonotonic bucket counts. This may happen when recording @@ -484,13 +578,16 @@ load 5m nonmonotonic_bucket{le="+Inf"} 0+8x10 # Nonmonotonic buckets, triggering an info annotation. -eval_info instant at 50m histogram_quantile(0.01, nonmonotonic_bucket) +eval instant at 50m histogram_quantile(0.01, nonmonotonic_bucket) + expect info {} 0.0045 -eval_info instant at 50m histogram_quantile(0.5, nonmonotonic_bucket) +eval instant at 50m histogram_quantile(0.5, nonmonotonic_bucket) + expect info {} 8.5 -eval_info instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) +eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) + expect info {} 979.75 # Buckets with different representations of the same upper bound. @@ -525,9 +622,11 @@ load_with_nhcb 5m request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10 request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10 -eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"}) +eval instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"}) + expect fail -eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"}) +eval instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"}) + expect fail # Histogram with constant buckets. load_with_nhcb 1m diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/limit.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/limit.test index 484760cc85c..3af8d3b364e 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/limit.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/limit.test @@ -15,10 +15,14 @@ load 5m bar 0 1 0 -1 0 1 0 -1 0 1 0 eval instant at 50m count(limitk by (group) (0, http_requests)) -# empty + expect no_info + expect no_warn + # empty eval instant at 50m count(limitk by (group) (-1, http_requests)) -# empty + expect no_info + expect no_warn + # empty # Exercise k==1 special case (as sample is added before the main series loop). eval instant at 50m count(limitk by (group) (1, http_requests) and http_requests) diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test index 9af45a73240..48cdb9ba4e9 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test @@ -73,7 +73,8 @@ eval instant at 10m sum by (__name__, env) (metric_total{env="1"}) # Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label). # This is an accidental side effect of delayed __name__ label dropping -eval_fail instant at 10m sum by (__name__) (rate({env="1"}[10m])) +eval instant at 10m sum by (__name__) (rate({env="1"}[10m])) + expect fail # Aggregation operators aggregate metrics with same labelset and to-be-dropped names. # This is an accidental side effect of delayed __name__ label dropping diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test index c4ffcba034e..e38e003b3f4 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test @@ -398,35 +398,44 @@ clear load 10m histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 -eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_1) +eval instant at 10m histogram_quantile(1.001, histogram_quantile_1) + expect warn {} Inf eval instant at 10m histogram_quantile(1, histogram_quantile_1) + expect no_warn {} 16 # The following quantiles are within a bucket. Exponential # interpolation is applied (rather than linear, as it is done for # classic histograms), leading to slightly different quantile values. eval instant at 10m histogram_quantile(0.99, histogram_quantile_1) + expect no_warn {} 15.67072476139083 eval instant at 10m histogram_quantile(0.9, histogram_quantile_1) + expect no_warn {} 12.99603834169977 eval instant at 10m histogram_quantile(0.6, histogram_quantile_1) + expect no_warn {} 4.594793419988138 eval instant at 10m histogram_quantile(0.5, histogram_quantile_1) + expect no_warn {} 1.5874010519681994 # Linear interpolation within the zero bucket after all. eval instant at 10m histogram_quantile(0.1, histogram_quantile_1) + expect no_warn {} 0.0006 eval instant at 10m histogram_quantile(0, histogram_quantile_1) + expect no_warn {} 0 -eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_1) +eval instant at 10m histogram_quantile(-1, histogram_quantile_1) + expect warn {} -Inf clear @@ -435,31 +444,39 @@ clear load 10m histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1 -eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_2) +eval instant at 10m histogram_quantile(1.001, histogram_quantile_2) + expect warn {} Inf eval instant at 10m histogram_quantile(1, histogram_quantile_2) + expect no_warn {} 0 # Again, the quantile values here are slightly different from what # they would be with linear interpolation. Note that quantiles # ending up in the zero bucket are linearly interpolated after all. eval instant at 10m histogram_quantile(0.99, histogram_quantile_2) + expect no_warn {} -0.00006 eval instant at 10m histogram_quantile(0.9, histogram_quantile_2) + expect no_warn {} -0.0006 eval instant at 10m histogram_quantile(0.5, histogram_quantile_2) + expect no_warn {} -1.5874010519681996 eval instant at 10m histogram_quantile(0.1, histogram_quantile_2) + expect no_warn {} -12.996038341699768 eval instant at 10m histogram_quantile(0, histogram_quantile_2) + expect no_warn {} -16 -eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2) +eval instant at 10m histogram_quantile(-1, histogram_quantile_2) + expect warn {} -Inf clear @@ -470,46 +487,59 @@ clear load 10m histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 -eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_3) +eval instant at 10m histogram_quantile(1.001, histogram_quantile_3) + expect warn {} Inf eval instant at 10m histogram_quantile(1, histogram_quantile_3) + expect no_warn {} 16 eval instant at 10m histogram_quantile(0.99, histogram_quantile_3) + expect no_warn {} 15.34822590920423 eval instant at 10m histogram_quantile(0.9, histogram_quantile_3) + expect no_warn {} 10.556063286183155 eval instant at 10m histogram_quantile(0.7, histogram_quantile_3) + expect no_warn {} 1.2030250360821164 # Linear interpolation in the zero bucket, symmetrically centered around # the zero point. eval instant at 10m histogram_quantile(0.55, histogram_quantile_3) + expect no_warn {} 0.0006 eval instant at 10m histogram_quantile(0.5, histogram_quantile_3) + expect no_warn {} 0 eval instant at 10m histogram_quantile(0.45, histogram_quantile_3) + expect no_warn {} -0.0006 # Finally negative buckets with mirrored exponential interpolation. eval instant at 10m histogram_quantile(0.3, histogram_quantile_3) + expect no_warn {} -1.2030250360821169 eval instant at 10m histogram_quantile(0.1, histogram_quantile_3) + expect no_warn {} -10.556063286183155 eval instant at 10m histogram_quantile(0.01, histogram_quantile_3) + expect no_warn {} -15.34822590920423 eval instant at 10m histogram_quantile(0, histogram_quantile_3) + expect no_warn {} -16 -eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3) +eval instant at 10m histogram_quantile(-1, histogram_quantile_3) + expect warn {} -Inf clear @@ -909,63 +939,84 @@ load 10m float_series_0 0+0x1 eval instant at 10m histogram_mul_div*3 + expect no_info {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m histogram_mul_div*-1 + expect no_info {} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}} eval instant at 10m -histogram_mul_div + expect no_info {} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}} eval instant at 10m histogram_mul_div*-3 + expect no_info {} {{schema:0 count:-90 sum:-99 z_bucket:-9 z_bucket_w:0.001 buckets:[-9 -9 -9] n_buckets:[-18 -18 -18]}} eval instant at 10m 3*histogram_mul_div + expect no_info {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m histogram_mul_div*float_series_3 + expect no_info {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m float_series_3*histogram_mul_div + expect no_info {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m histogram_mul_div/3 + expect no_info {} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}} eval instant at 10m histogram_mul_div/-3 + expect no_info {} {{schema:0 count:-10 sum:-11 z_bucket:-1 z_bucket_w:0.001 buckets:[-1 -1 -1] n_buckets:[-2 -2 -2]}} eval instant at 10m histogram_mul_div/float_series_3 + expect no_info {} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}} eval instant at 10m histogram_mul_div*0 + expect no_info {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} eval instant at 10m 0*histogram_mul_div + expect no_info {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} eval instant at 10m histogram_mul_div*float_series_0 + expect no_info {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} eval instant at 10m float_series_0*histogram_mul_div + expect no_info {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} eval instant at 10m histogram_mul_div/0 + expect no_info {} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}} eval instant at 10m histogram_mul_div/float_series_0 + expect no_info {} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}} eval instant at 10m histogram_mul_div*0/0 + expect no_info {} {{schema:0 count:NaN sum:NaN z_bucket_w:0.001 z_bucket:NaN}} -eval_info instant at 10m histogram_mul_div*histogram_mul_div +eval instant at 10m histogram_mul_div*histogram_mul_div + expect info -eval_info instant at 10m histogram_mul_div/histogram_mul_div +eval instant at 10m histogram_mul_div/histogram_mul_div + expect info -eval_info instant at 10m float_series_3/histogram_mul_div +eval instant at 10m float_series_3/histogram_mul_div + expect info -eval_info instant at 10m 0/histogram_mul_div +eval instant at 10m 0/histogram_mul_div + expect info clear @@ -976,13 +1027,17 @@ load 10m histogram_sample {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 float_sample 0x1 -eval_info instant at 10m float_sample+histogram_sample +eval instant at 10m float_sample+histogram_sample + expect info -eval_info instant at 10m histogram_sample+float_sample +eval instant at 10m histogram_sample+float_sample + expect info -eval_info instant at 10m float_sample-histogram_sample +eval instant at 10m float_sample-histogram_sample + expect info -eval_info instant at 10m histogram_sample-float_sample +eval instant at 10m histogram_sample-float_sample + expect info # Counter reset only noticeable in a single bucket. load 5m @@ -1020,11 +1075,13 @@ load 30s some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}} # Test the case where we only have two points for rate -eval_warn instant at 30s rate(some_metric[1m]) +eval instant at 30s rate(some_metric[1m]) + expect warn {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} # Test the case where we have more than two points for rate -eval_warn instant at 1m rate(some_metric[1m30s]) +eval instant at 1m rate(some_metric[1m30s]) + expect warn {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} clear @@ -1034,18 +1091,24 @@ load 30s some_metric {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} # Start and end with exponential, with custom in the middle. -eval_warn instant at 1m rate(some_metric[1m30s]) +eval instant at 1m rate(some_metric[1m30s]) + expect warn # Should produce no results. # Start and end with custom, with exponential in the middle. -eval_warn instant at 1m30s rate(some_metric[1m30s]) +eval instant at 1m30s rate(some_metric[1m30s]) + expect warn # Should produce no results. -# Start with custom, end with exponential. Return the exponential histogram divided by 30. +# Start with custom, end with exponential. Return the exponential histogram divided by 48. +# (The 1st sample is the NHCB with count:1. It is mostly ignored with the exception of the +# count, which means the rate calculation extrapolates until the count hits 0.) eval instant at 1m rate(some_metric[1m]) - {} {{schema:0 sum:0.16666666666666666 count:0.13333333333333333 buckets:[0.03333333333333333 0.06666666666666667 0.03333333333333333]}} + {} {{count:0.08333333333333333 sum:0.10416666666666666 counter_reset_hint:gauge buckets:[0.020833333333333332 0.041666666666666664 0.020833333333333332]}} # Start with exponential, end with custom. Return the custom buckets histogram divided by 30. +# (With the 2nd sample having a count of 1, the extrapolation to zero lands exactly at the +# left boundary of the range, so no extrapolation limitation needed.) eval instant at 30s rate(some_metric[1m]) {} {{schema:-53 sum:0.03333333333333333 count:0.03333333333333333 custom_values:[5 10] buckets:[0.03333333333333333]}} @@ -1107,10 +1170,12 @@ load 6m # T=0: only exponential # T=6: only custom # T=12: mixed, should be ignored and emit a warning -eval_warn range from 0 to 12m step 6m sum(metric) +eval range from 0 to 12m step 6m sum(metric) + expect warn {} {{sum:7 count:5 buckets:[2 3 2]}} {{schema:-53 sum:16 count:3 custom_values:[5 10] buckets:[1 2]}} _ -eval_warn range from 0 to 12m step 6m avg(metric) +eval range from 0 to 12m step 6m avg(metric) + expect warn {} {{sum:3.5 count:2.5 buckets:[1 1.5 1]}} {{schema:-53 sum:8 count:1.5 custom_values:[5 10] buckets:[0.5 1]}} _ clear @@ -1124,10 +1189,12 @@ load 6m # T=0: incompatible, should be ignored and emit a warning # T=6: compatible # T=12: incompatible followed by compatible, should be ignored and emit a warning -eval_warn range from 0 to 12m step 6m sum(metric) +eval range from 0 to 12m step 6m sum(metric) + expect warn {} _ {{schema:-53 sum:2 count:2 custom_values:[5 10] buckets:[2]}} _ -eval_warn range from 0 to 12m step 6m avg(metric) +eval range from 0 to 12m step 6m avg(metric) + expect warn {} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _ # Test incompatible schemas with additional aggregation operators @@ -1159,9 +1226,11 @@ eval range from 0 to 12m step 6m metric{series="1"} or ignoring(series) metric{s metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ _ # Test incompatible schemas with arithmetic binary operators -eval_warn range from 0 to 12m step 6m metric{series="2"} + ignoring (series) metric{series="3"} +eval range from 0 to 12m step 6m metric{series="2"} + ignoring (series) metric{series="3"} + expect warn -eval_warn range from 0 to 12m step 6m metric{series="2"} - ignoring (series) metric{series="3"} +eval range from 0 to 12m step 6m metric{series="2"} - ignoring (series) metric{series="3"} + expect warn clear @@ -1171,12 +1240,15 @@ load 6m metric2 {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} eval range from 0 to 6m step 6m metric1 == metric2 -metric1{} _ {{schema:-53 count:1 sum:1 custom_values:[5 10] buckets:[1]}} + expect no_info + metric1{} _ {{schema:-53 count:1 sum:1 custom_values:[5 10] buckets:[1]}} eval range from 0 to 6m step 6m metric1 != metric2 -metric1{} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ + expect no_info + metric1{} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ -eval_info range from 0 to 6m step 6m metric2 > metric2 +eval range from 0 to 6m step 6m metric2 > metric2 + expect info clear @@ -1186,62 +1258,82 @@ load 6m # If evaluating at 12m, the first two NHCBs have the same custom values # while the 3rd one has different ones. -eval_warn instant at 12m sum_over_time(nhcb_metric[13m]) +eval instant at 12m sum_over_time(nhcb_metric[13m]) + expect warn -eval_warn instant at 12m avg_over_time(nhcb_metric[13m]) +eval instant at 12m avg_over_time(nhcb_metric[13m]) + expect warn eval instant at 12m last_over_time(nhcb_metric[13m]) -nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + expect no_warn + nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} eval instant at 12m count_over_time(nhcb_metric[13m]) -{} 3 + expect no_warn + {} 3 eval instant at 12m present_over_time(nhcb_metric[13m]) -{} 1 + expect no_warn + {} 1 eval instant at 12m changes(nhcb_metric[13m]) -{} 1 + expect no_warn + {} 1 -eval_warn instant at 12m delta(nhcb_metric[13m]) +eval instant at 12m delta(nhcb_metric[13m]) + expect warn -eval_warn instant at 12m increase(nhcb_metric[13m]) +eval instant at 12m increase(nhcb_metric[13m]) + expect warn -eval_warn instant at 12m rate(nhcb_metric[13m]) +eval instant at 12m rate(nhcb_metric[13m]) + expect warn eval instant at 12m resets(nhcb_metric[13m]) -{} 1 + expect no_warn + {} 1 # Now doing the same again, but at 18m, where the first NHCB has # different custom_values compared to the other two. This now # works with no warning for increase() and rate(). No change # otherwise. -eval_warn instant at 18m sum_over_time(nhcb_metric[13m]) +eval instant at 18m sum_over_time(nhcb_metric[13m]) + expect warn -eval_warn instant at 18m avg_over_time(nhcb_metric[13m]) +eval instant at 18m avg_over_time(nhcb_metric[13m]) + expect warn eval instant at 18m last_over_time(nhcb_metric[13m]) -nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + expect no_warn + nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} eval instant at 18m count_over_time(nhcb_metric[13m]) -{} 3 + expect no_warn + {} 3 eval instant at 18m present_over_time(nhcb_metric[13m]) -{} 1 + expect no_warn + {} 1 eval instant at 18m changes(nhcb_metric[13m]) -{} 1 + expect no_warn + {} 1 -eval_warn instant at 18m delta(nhcb_metric[13m]) +eval instant at 18m delta(nhcb_metric[13m]) + expect warn eval instant at 18m increase(nhcb_metric[13m]) -{} {{schema:-53 count:1.0833333333333333 sum:1.0833333333333333 custom_values:[5 10] buckets:[1.0833333333333333]}} + expect no_warn + {} {{schema:-53 count:1.0833333333333333 sum:1.0833333333333333 custom_values:[5 10] buckets:[1.0833333333333333]}} eval instant at 18m rate(nhcb_metric[13m]) -{} {{schema:-53 count:0.0013888888888888887 sum:0.0013888888888888887 custom_values:[5 10] buckets:[0.0013888888888888887]}} + expect no_warn + {} {{schema:-53 count:0.0013888888888888887 sum:0.0013888888888888887 custom_values:[5 10] buckets:[0.0013888888888888887]}} eval instant at 18m resets(nhcb_metric[13m]) -{} 1 + expect no_warn + {} 1 clear @@ -1259,7 +1351,8 @@ load 1m metric{group="incompatible-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} metric{group="incompatible-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} -eval_warn instant at 0 sum by (group) (metric) +eval instant at 0 sum by (group) (metric) + expect warn {group="just-floats"} 5 {group="just-exponential-histograms"} {{sum:5 count:7 buckets:[2 3 2]}} {group="just-custom-histograms"} {{schema:-53 sum:4 count:5 custom_values:[2] buckets:[8]}} @@ -1275,17 +1368,22 @@ load 10m histogram_sum_float{idx="0"} 42.0x1 eval instant at 10m sum(histogram_sum) + expect no_warn {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}} -eval_warn instant at 10m sum({idx="0"}) +eval instant at 10m sum({idx="0"}) + expect warn eval instant at 10m sum(histogram_sum{idx="0"} + ignoring(idx) histogram_sum{idx="1"} + ignoring(idx) histogram_sum{idx="2"} + ignoring(idx) histogram_sum{idx="3"}) + expect no_warn {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}} eval instant at 10m count(histogram_sum) + expect no_warn {} 4 eval instant at 10m avg(histogram_sum) + expect no_warn {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}} clear @@ -1376,21 +1474,25 @@ eval instant at 1m histogram_fraction(-Inf, +Inf, histogram_nan) clear -# Tests to demonstrate how an extrapolation below zero is prevented for a float counter, but not for native histograms. -# I.e. the float counter that behaves the same as the histogram count might yield a different result after `increase`. +# Tests to demonstrate how an extrapolation below zero is prevented for both float counters and native counter histograms. +# Note that the float counter behaves the same as the histogram count after `increase`. load 1m metric{type="histogram"} {{schema:0 count:15 sum:25 buckets:[5 10]}} {{schema:0 count:2490 sum:75 buckets:[15 2475]}}x55 metric{type="counter"} 15 2490x55 # End of range coincides with sample. Zero point of count is reached within the range. +# Note that the 2nd bucket has an exaggerated increase of 2479.939393939394 (although +# it has a value of only 2475 at the end of the range). eval instant at 55m increase(metric[90m]) - {type="histogram"} {{count:2497.5 sum:50.45454545454545 counter_reset_hint:gauge buckets:[10.09090909090909 2487.409090909091]}} + {type="histogram"} {{count:2490 sum:50.303030303030305 counter_reset_hint:gauge buckets:[10.06060606060606 2479.939393939394]}} {type="counter"} 2490 # End of range does not coincide with sample. Zero point of count is reached within the range. +# The 2nd bucket again has an exaggerated increase, but it is less obvious because of the +# right-side extrapolation. eval instant at 54m30s increase(metric[90m]) - {type="histogram"} {{count:2520.8333333333335 sum:50.92592592592593 counter_reset_hint:gauge buckets:[10.185185185185187 2510.6481481481483]}} + {type="histogram"} {{count:2512.9166666666665 sum:50.76599326599326 counter_reset_hint:gauge buckets:[10.153198653198652 2502.7634680134674]}} {type="counter"} 2512.9166666666665 # End of range coincides with sample. Zero point of count is reached outside of (i.e. before) the range. @@ -1408,3 +1510,16 @@ eval instant at 55m increase(metric[55m15s]) eval instant at 54m30s increase(metric[54m45s]) {type="histogram"} {{count:2509.375 sum:50.69444444444444 counter_reset_hint:gauge buckets:[10.13888888888889 2499.236111111111]}} {type="counter"} 2509.375 + +# Try the same, but now extract just the histogram count via `histogram_count`. +eval instant at 55m histogram_count(increase(metric[90m])) + {type="histogram"} 2490 + +eval instant at 54m30s histogram_count(increase(metric[90m])) + {type="histogram"} 2512.9166666666665 + +eval instant at 55m histogram_count(increase(metric[55m15s])) + {type="histogram"} 2486.25 + +eval instant at 54m30s histogram_count(increase(metric[54m45s])) + {type="histogram"} 2509.375 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test index 667989ca77d..0e779f192cb 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test @@ -289,24 +289,32 @@ eval instant at 50m http_requests_total{job="api-server", instance="0", group="p {job="api-server", instance="0", group="production"} 1 # The histogram is ignored here so the result doesn't change but it has an info annotation now. -eval_info instant at 5m {job="app-server"} == 80 +eval instant at 5m {job="app-server"} == 80 + expect info http_requests_total{group="canary", instance="1", job="app-server"} 80 -eval_info instant at 5m http_requests_histogram != 80 +eval instant at 5m http_requests_histogram != 80 + expect info -eval_info instant at 5m http_requests_histogram > 80 +eval instant at 5m http_requests_histogram > 80 + expect info -eval_info instant at 5m http_requests_histogram < 80 +eval instant at 5m http_requests_histogram < 80 + expect info -eval_info instant at 5m http_requests_histogram >= 80 +eval instant at 5m http_requests_histogram >= 80 + expect info -eval_info instant at 5m http_requests_histogram <= 80 +eval instant at 5m http_requests_histogram <= 80 + expect info # Should produce valid results in case of (in)equality between two histograms. eval instant at 5m http_requests_histogram == http_requests_histogram + expect no_info http_requests_histogram{job="app-server", instance="1", group="production"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} eval instant at 5m http_requests_histogram != http_requests_histogram + expect no_info # group_left/group_right. @@ -470,7 +478,8 @@ load 5m testmetric1{src="a",dst="b"} 0 testmetric2{src="a",dst="b"} 1 -eval_fail instant at 0m -{__name__=~'testmetric1|testmetric2'} +eval instant at 0m -{__name__=~'testmetric1|testmetric2'} + expect fail clear @@ -520,290 +529,386 @@ load 6m right_floats_for_histograms 0 -1 2 3 4 eval range from 0 to 60m step 6m left_floats == right_floats + expect no_info left_floats _ _ _ _ 3 _ _ _ _ Inf -Inf eval range from 0 to 60m step 6m left_floats == bool right_floats + expect no_info {} 0 _ _ _ 1 _ 0 0 0 1 1 eval range from 0 to 60m step 6m left_floats == does_not_match + expect no_info # No results. eval range from 0 to 24m step 6m left_histograms == right_histograms + expect no_info left_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} _ _ _ _ eval range from 0 to 24m step 6m left_histograms == bool right_histograms + expect no_info {} 1 0 _ _ _ -eval_info range from 0 to 24m step 6m left_histograms == right_floats_for_histograms +eval range from 0 to 24m step 6m left_histograms == right_floats_for_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms == bool right_floats_for_histograms +eval range from 0 to 24m step 6m left_histograms == bool right_floats_for_histograms + expect info # No results. eval range from 0 to 60m step 6m left_floats != right_floats + expect no_info left_floats 1 _ _ _ _ _ 4 5 NaN _ _ eval range from 0 to 60m step 6m left_floats != bool right_floats + expect no_info {} 1 _ _ _ 0 _ 1 1 1 0 0 eval range from 0 to 24m step 6m left_histograms != right_histograms + expect no_info left_histograms _ {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} _ _ _ eval range from 0 to 24m step 6m left_histograms != bool right_histograms + expect no_info {} 0 1 _ _ _ -eval_info range from 0 to 24m step 6m left_histograms != right_floats_for_histograms +eval range from 0 to 24m step 6m left_histograms != right_floats_for_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms != bool right_floats_for_histograms +eval range from 0 to 24m step 6m left_histograms != bool right_floats_for_histograms + expect info # No results. eval range from 0 to 60m step 6m left_floats > right_floats + expect no_info left_floats _ _ _ _ _ _ 4 _ _ _ _ eval range from 0 to 60m step 6m left_floats > bool right_floats + expect no_info {} 0 _ _ _ 0 _ 1 0 0 0 0 -eval_info range from 0 to 24m step 6m left_histograms > right_histograms +eval range from 0 to 24m step 6m left_histograms > right_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms > bool right_histograms +eval range from 0 to 24m step 6m left_histograms > bool right_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms > right_floats_for_histograms +eval range from 0 to 24m step 6m left_histograms > right_floats_for_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms > bool right_floats_for_histograms +eval range from 0 to 24m step 6m left_histograms > bool right_floats_for_histograms + expect info # No results. eval range from 0 to 60m step 6m left_floats >= right_floats + expect no_info left_floats _ _ _ _ 3 _ 4 _ _ Inf -Inf eval range from 0 to 60m step 6m left_floats >= bool right_floats + expect no_info {} 0 _ _ _ 1 _ 1 0 0 1 1 -eval_info range from 0 to 24m step 6m left_histograms >= right_histograms +eval range from 0 to 24m step 6m left_histograms >= right_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms >= bool right_histograms +eval range from 0 to 24m step 6m left_histograms >= bool right_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms >= right_floats_for_histograms +eval range from 0 to 24m step 6m left_histograms >= right_floats_for_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms >= bool right_floats_for_histograms +eval range from 0 to 24m step 6m left_histograms >= bool right_floats_for_histograms + expect info # No results. eval range from 0 to 60m step 6m left_floats < right_floats + expect no_info left_floats 1 _ _ _ _ _ _ 5 _ _ _ eval range from 0 to 60m step 6m left_floats < bool right_floats + expect no_info {} 1 _ _ _ 0 _ 0 1 0 0 0 -eval_info range from 0 to 24m step 6m left_histograms < right_histograms +eval range from 0 to 24m step 6m left_histograms < right_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms < bool right_histograms +eval range from 0 to 24m step 6m left_histograms < bool right_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms < right_floats_for_histograms +eval range from 0 to 24m step 6m left_histograms < right_floats_for_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms < bool right_floats_for_histograms +eval range from 0 to 24m step 6m left_histograms < bool right_floats_for_histograms + expect info # No results. eval range from 0 to 60m step 6m left_floats <= right_floats + expect no_info left_floats 1 _ _ _ 3 _ _ 5 _ Inf -Inf eval range from 0 to 60m step 6m left_floats <= bool right_floats + expect no_info {} 1 _ _ _ 1 _ 0 1 0 1 1 -eval_info range from 0 to 24m step 6m left_histograms <= right_histograms +eval range from 0 to 24m step 6m left_histograms <= right_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms <= bool right_histograms +eval range from 0 to 24m step 6m left_histograms <= bool right_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms <= right_floats_for_histograms +eval range from 0 to 24m step 6m left_histograms <= right_floats_for_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms <= bool right_floats_for_histograms +eval range from 0 to 24m step 6m left_histograms <= bool right_floats_for_histograms + expect info # No results. # Vector / scalar combinations with scalar on right side eval range from 0 to 60m step 6m left_floats == 3 + expect no_info left_floats _ _ _ _ 3 _ _ _ _ _ _ eval range from 0 to 60m step 6m left_floats != 3 + expect no_info left_floats 1 2 _ _ _ _ 4 5 NaN Inf -Inf eval range from 0 to 60m step 6m left_floats > 3 + expect no_info left_floats _ _ _ _ _ _ 4 5 _ Inf _ eval range from 0 to 60m step 6m left_floats >= 3 + expect no_info left_floats _ _ _ _ 3 _ 4 5 _ Inf _ eval range from 0 to 60m step 6m left_floats < 3 + expect no_info left_floats 1 2 _ _ _ _ _ _ _ _ -Inf eval range from 0 to 60m step 6m left_floats <= 3 + expect no_info left_floats 1 2 _ _ 3 _ _ _ _ _ -Inf eval range from 0 to 60m step 6m left_floats == bool 3 + expect no_info {} 0 0 _ _ 1 _ 0 0 0 0 0 eval range from 0 to 60m step 6m left_floats == Inf + expect no_info left_floats _ _ _ _ _ _ _ _ _ Inf _ eval range from 0 to 60m step 6m left_floats == bool Inf + expect no_info {} 0 0 _ _ 0 _ 0 0 0 1 0 eval range from 0 to 60m step 6m left_floats == NaN + expect no_info # No results. eval range from 0 to 60m step 6m left_floats == bool NaN + expect no_info {} 0 0 _ _ 0 _ 0 0 0 0 0 -eval_info range from 0 to 24m step 6m left_histograms == 3 +eval range from 0 to 24m step 6m left_histograms == 3 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms == 0 +eval range from 0 to 24m step 6m left_histograms == 0 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms != 3 +eval range from 0 to 24m step 6m left_histograms != 3 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms != 0 +eval range from 0 to 24m step 6m left_histograms != 0 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms > 3 +eval range from 0 to 24m step 6m left_histograms > 3 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms > 0 +eval range from 0 to 24m step 6m left_histograms > 0 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms >= 3 +eval range from 0 to 24m step 6m left_histograms >= 3 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms >= 0 +eval range from 0 to 24m step 6m left_histograms >= 0 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms < 3 +eval range from 0 to 24m step 6m left_histograms < 3 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms < 0 +eval range from 0 to 24m step 6m left_histograms < 0 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms <= 3 +eval range from 0 to 24m step 6m left_histograms <= 3 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms <= 0 +eval range from 0 to 24m step 6m left_histograms <= 0 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms == bool 3 +eval range from 0 to 24m step 6m left_histograms == bool 3 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms == bool 0 +eval range from 0 to 24m step 6m left_histograms == bool 0 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms != bool 3 +eval range from 0 to 24m step 6m left_histograms != bool 3 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms != bool 0 +eval range from 0 to 24m step 6m left_histograms != bool 0 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms > bool 3 +eval range from 0 to 24m step 6m left_histograms > bool 3 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms > bool 0 +eval range from 0 to 24m step 6m left_histograms > bool 0 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms >= bool 3 +eval range from 0 to 24m step 6m left_histograms >= bool 3 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms >= bool 0 +eval range from 0 to 24m step 6m left_histograms >= bool 0 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms < bool 3 +eval range from 0 to 24m step 6m left_histograms < bool 3 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms < bool 0 +eval range from 0 to 24m step 6m left_histograms < bool 0 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms <= bool 3 +eval range from 0 to 24m step 6m left_histograms <= bool 3 + expect info # No results. -eval_info range from 0 to 24m step 6m left_histograms <= bool 0 +eval range from 0 to 24m step 6m left_histograms <= bool 0 + expect info # No results. # Vector / scalar combinations with scalar on left side eval range from 0 to 60m step 6m 3 == left_floats + expect no_info left_floats _ _ _ _ 3 _ _ _ _ _ _ eval range from 0 to 60m step 6m 3 != left_floats + expect no_info left_floats 1 2 _ _ _ _ 4 5 NaN Inf -Inf eval range from 0 to 60m step 6m 3 < left_floats + expect no_info left_floats _ _ _ _ _ _ 4 5 _ Inf _ eval range from 0 to 60m step 6m 3 <= left_floats + expect no_info left_floats _ _ _ _ 3 _ 4 5 _ Inf _ eval range from 0 to 60m step 6m 3 > left_floats + expect no_info left_floats 1 2 _ _ _ _ _ _ _ _ -Inf eval range from 0 to 60m step 6m 3 >= left_floats + expect no_info left_floats 1 2 _ _ 3 _ _ _ _ _ -Inf eval range from 0 to 60m step 6m 3 == bool left_floats + expect no_info {} 0 0 _ _ 1 _ 0 0 0 0 0 eval range from 0 to 60m step 6m Inf == left_floats + expect no_info left_floats _ _ _ _ _ _ _ _ _ Inf _ eval range from 0 to 60m step 6m Inf == bool left_floats + expect no_info {} 0 0 _ _ 0 _ 0 0 0 1 0 eval range from 0 to 60m step 6m NaN == left_floats + expect no_info + expect no_warn # No results. eval range from 0 to 60m step 6m NaN == bool left_floats + expect no_info {} 0 0 _ _ 0 _ 0 0 0 0 0 -eval_info range from 0 to 24m step 6m 3 == left_histograms +eval range from 0 to 24m step 6m 3 == left_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m 0 == left_histograms +eval range from 0 to 24m step 6m 0 == left_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m 3 != left_histograms +eval range from 0 to 24m step 6m 3 != left_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m 0 != left_histograms +eval range from 0 to 24m step 6m 0 != left_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m 3 < left_histograms +eval range from 0 to 24m step 6m 3 < left_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m 0 < left_histograms +eval range from 0 to 24m step 6m 0 < left_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m 3 < left_histograms +eval range from 0 to 24m step 6m 3 < left_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m 0 < left_histograms +eval range from 0 to 24m step 6m 0 < left_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m 3 > left_histograms +eval range from 0 to 24m step 6m 3 > left_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m 0 > left_histograms +eval range from 0 to 24m step 6m 0 > left_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m 3 >= left_histograms +eval range from 0 to 24m step 6m 3 >= left_histograms + expect info # No results. -eval_info range from 0 to 24m step 6m 0 >= left_histograms +eval range from 0 to 24m step 6m 0 >= left_histograms + expect info # No results. clear diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test index 8c7c178b852..f803dba349d 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test @@ -156,4 +156,6 @@ load 5m foo 3+0x10 eval instant at 12m min_over_time((topk(1, foo))[1m:5m]) + expect no_info + expect no_warn #empty diff --git a/vendor/github.com/prometheus/prometheus/promql/value.go b/vendor/github.com/prometheus/prometheus/promql/value.go index dc59b9e9cc8..2e387117e51 100644 --- a/vendor/github.com/prometheus/prometheus/promql/value.go +++ b/vendor/github.com/prometheus/prometheus/promql/value.go @@ -475,8 +475,12 @@ func (ssi *storageSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *his panic(errors.New("storageSeriesIterator: AtHistogram not supported")) } -func (ssi *storageSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { - return ssi.currT, ssi.currH +func (ssi *storageSeriesIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { + if fh == nil { + return ssi.currT, ssi.currH.Copy() + } + ssi.currH.CopyTo(fh) + return ssi.currT, fh } func (ssi *storageSeriesIterator) AtT() int64 { diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index 575a32ba280..6ecdf793d1e 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -309,7 +309,7 @@ type GroupLoader interface { type FileLoader struct{} func (FileLoader) Load(identifier string, ignoreUnknownFields bool) (*rulefmt.RuleGroups, []error) { - return rulefmt.ParseFile(identifier, ignoreUnknownFields) + return rulefmt.ParseFile(identifier, rulefmt.WithIgnoreUnknownFields(ignoreUnknownFields)) } func (FileLoader) Parse(query string) (parser.Expr, error) { return parser.ParseExpr(query) } @@ -627,7 +627,7 @@ func ParseFiles(patterns []string) error { } } for fn, pat := range files { - _, errs := rulefmt.ParseFile(fn, false) + _, errs := rulefmt.ParseFile(fn) if len(errs) > 0 { return fmt.Errorf("parse rules from file %q (pattern: %q): %w", fn, pat, errors.Join(errs...)) } diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go index 7ffce2f38d7..c2da4558588 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/manager.go +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -127,7 +127,10 @@ func (m *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) error { go m.reloader() for { select { - case ts := <-tsets: + case ts, ok := <-tsets: + if !ok { + break + } m.updateTsets(ts) select { diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index d92b7fc16b5..b0f7047b9e0 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -23,6 +23,7 @@ import ( "log/slog" "math" "net/http" + "net/http/httptrace" "reflect" "slices" "strconv" @@ -36,6 +37,10 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" + "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -144,15 +149,18 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed logger = promslog.NewNopLogger() } - client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...) + client, err := newScrapeClient(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...) if err != nil { - return nil, fmt.Errorf("error creating HTTP client: %w", err) + return nil, err } - if cfg.MetricNameValidationScheme == model.UnsetValidation { + switch cfg.MetricNameValidationScheme { + case model.LegacyValidation, model.UTF8Validation: + default: return nil, errors.New("cfg.MetricNameValidationScheme must be set in scrape configuration") } - escapingScheme, err := config.ToEscapingScheme(cfg.MetricNameEscapingScheme, cfg.MetricNameValidationScheme) + var escapingScheme model.EscapingScheme + escapingScheme, err = config.ToEscapingScheme(cfg.MetricNameEscapingScheme, cfg.MetricNameValidationScheme) if err != nil { return nil, fmt.Errorf("invalid metric name escaping scheme, %w", err) } @@ -313,17 +321,19 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { sp.metrics.targetScrapePoolReloads.Inc() start := time.Now() - client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, sp.httpOpts...) + client, err := newScrapeClient(cfg.HTTPClientConfig, cfg.JobName, sp.httpOpts...) if err != nil { sp.metrics.targetScrapePoolReloadsFailed.Inc() - return fmt.Errorf("error creating HTTP client: %w", err) + return err } reuseCache := reusableCache(sp.config, cfg) sp.config = cfg oldClient := sp.client sp.client = client - if cfg.MetricNameValidationScheme == model.UnsetValidation { + switch cfg.MetricNameValidationScheme { + case model.LegacyValidation, model.UTF8Validation: + default: return errors.New("cfg.MetricNameValidationScheme must be set in scrape configuration") } sp.validationScheme = cfg.MetricNameValidationScheme @@ -829,6 +839,8 @@ func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) { s.req = req } + ctx, span := otel.Tracer("").Start(ctx, "Scrape", trace.WithSpanKind(trace.SpanKindClient)) + defer span.End() return s.client.Do(s.req.WithContext(ctx)) } @@ -2273,3 +2285,16 @@ func pickSchema(bucketFactor float64) int32 { return int32(floor) } } + +func newScrapeClient(cfg config_util.HTTPClientConfig, name string, optFuncs ...config_util.HTTPClientOption) (*http.Client, error) { + client, err := config_util.NewClientFromConfig(cfg, name, optFuncs...) + if err != nil { + return nil, fmt.Errorf("error creating HTTP client: %w", err) + } + client.Transport = otelhttp.NewTransport( + client.Transport, + otelhttp.WithClientTrace(func(ctx context.Context) *httptrace.ClientTrace { + return otelhttptrace.NewClientTrace(ctx, otelhttptrace.WithoutSubSpans()) + })) + return client, nil +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go index b763b3e2b4b..8e43b62aae2 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -133,6 +133,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s // Include name, version and schema URL. scopeLabelCount = scope.attributes.Len() + 3 } + // Calculate the maximum possible number of labels we could return so we can preallocate l. maxLabelCount := attributes.Len() + len(settings.ExternalLabels) + len(promotedAttrs) + scopeLabelCount + len(extras)/2 @@ -175,15 +176,15 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s } } if promoteScope { - l["otel_scope_name"] = scope.name - l["otel_scope_version"] = scope.version - l["otel_scope_schema_url"] = scope.schemaURL scope.attributes.Range(func(k string, v pcommon.Value) bool { - name := "otel_scope_" + k - name = labelNamer.Build(name) + name := labelNamer.Build("otel_scope_" + k) l[name] = v.AsString() return true }) + // Scope Name, Version and Schema URL are added after attributes to ensure they are not overwritten by attributes. + l["otel_scope_name"] = scope.name + l["otel_scope_version"] = scope.version + l["otel_scope_schema_url"] = scope.schemaURL } // Map service.name + service.namespace to job. diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 91563bf2c29..9955fd5fc66 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -53,10 +53,10 @@ type Settings struct { KeepIdentifyingResourceAttributes bool ConvertHistogramsToNHCB bool AllowDeltaTemporality bool - // PromoteScopeMetadata controls whether to promote OTel scope metadata to metric labels. - PromoteScopeMetadata bool // LookbackDelta is the PromQL engine lookback delta. LookbackDelta time.Duration + // PromoteScopeMetadata controls whether to promote OTel scope metadata to metric labels. + PromoteScopeMetadata bool // Mimir specifics. EnableCreatedTimestampZeroIngestion bool diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index ef180ae4a2e..cacad16a3c8 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -600,15 +600,16 @@ func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) er otlpCfg := rw.config().OTLPConfig converter := otlptranslator.NewPrometheusConverter() + annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{ - AddMetricSuffixes: otlpCfg.TranslationStrategy != config.NoTranslation, - AllowUTF8: otlpCfg.TranslationStrategy != config.UnderscoreEscapingWithSuffixes, + AddMetricSuffixes: otlpCfg.TranslationStrategy.ShouldAddSuffixes(), + AllowUTF8: !otlpCfg.TranslationStrategy.ShouldEscape(), PromoteResourceAttributes: otlptranslator.NewPromoteResourceAttributes(otlpCfg), KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes, ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB, AllowDeltaTemporality: rw.allowDeltaTemporality, - PromoteScopeMetadata: otlpCfg.PromoteScopeMetadata, LookbackDelta: rw.lookbackDelta, + PromoteScopeMetadata: otlpCfg.PromoteScopeMetadata, // Mimir specifics. EnableCreatedTimestampZeroIngestion: rw.enableCTZeroIngestion, diff --git a/vendor/github.com/prometheus/prometheus/template/template.go b/vendor/github.com/prometheus/prometheus/template/template.go index 75a9f33bd2e..87ca32b3469 100644 --- a/vendor/github.com/prometheus/prometheus/template/template.go +++ b/vendor/github.com/prometheus/prometheus/template/template.go @@ -263,6 +263,17 @@ func NewTemplateExpander( return floatToTime(v) }, + "toDuration": func(i interface{}) (*time.Duration, error) { + v, err := common_templates.ConvertToFloat(i) + if err != nil { + return nil, err + } + d := time.Duration(v * float64(time.Second)) + return &d, nil + }, + "now": func() float64 { + return float64(timestamp) / 1000.0 + }, "pathPrefix": func() string { return externalURL.Path }, @@ -270,7 +281,7 @@ func NewTemplateExpander( return externalURL.String() }, "parseDuration": func(d string) (float64, error) { - v, err := model.ParseDuration(d) + v, err := model.ParseDurationAllowNegative(d) if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go index e5ad4028bbb..7f3b2a5968f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go @@ -343,7 +343,7 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) ( // original deltas to a new set of deltas to match a new span layout that adds // buckets, we simply need to generate a list of inserts. // -// Note: Within expandSpansForward we don't have to worry about the changes to the +// Note: Within expandFloatSpansAndBuckets we don't have to worry about the changes to the // spans themselves, thanks to the iterators we get to work with the more useful // bucket indices (which of course directly correspond to the buckets we have to // adjust). @@ -378,6 +378,48 @@ func expandFloatSpansAndBuckets(a, b []histogram.Span, aBuckets []xorValue, bBuc bCount = bBuckets[bCountIdx] } + // addInsert updates the current Insert with a new insert at the given + // bucket index (otherIdx). + addInsert := func(inserts []Insert, insert *Insert, otherIdx int) []Insert { + if insert.num == 0 { + // First insert. + insert.bucketIdx = otherIdx + } else if insert.bucketIdx+insert.num != otherIdx { + // Insert is not continuous from previous insert. + inserts = append(inserts, *insert) + insert.num = 0 + insert.bucketIdx = otherIdx + } + insert.num++ + return inserts + } + + advanceA := func() { + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + aIdx, aOK = ai.Next() + aInter.pos++ + aCountIdx++ + if aOK { + aCount = aBuckets[aCountIdx].value + } + } + + advanceB := func() { + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + bIdx, bOK = bi.Next() + bInter.pos++ + bCountIdx++ + if bOK { + bCount = bBuckets[bCountIdx] + } + } + loop: for { switch { @@ -389,105 +431,37 @@ loop: return nil, nil, false } - // Finish WIP insert for a and reset. - if aInter.num > 0 { - aInserts = append(aInserts, aInter) - aInter.num = 0 - } - - // Finish WIP insert for b and reset. - if bInter.num > 0 { - bInserts = append(bInserts, bInter) - bInter.num = 0 - } - - aIdx, aOK = ai.Next() - bIdx, bOK = bi.Next() - aInter.pos++ // Advance potential insert position. - aCountIdx++ // Advance absolute bucket count index for a. - if aOK { - aCount = aBuckets[aCountIdx].value - } - bInter.pos++ // Advance potential insert position. - bCountIdx++ // Advance absolute bucket count index for b. - if bOK { - bCount = bBuckets[bCountIdx] - } + advanceA() + advanceB() continue case aIdx < bIdx: // b misses a bucket index that is in a. // This is ok if the count in a is 0, in which case we make a note to // fill in the bucket in b and advance a. if aCount == 0 { - bInter.num++ // Mark that we need to insert a bucket in b. - bInter.bucketIdx = aIdx - // Advance a - if aInter.num > 0 { - aInserts = append(aInserts, aInter) - aInter.num = 0 - } - aIdx, aOK = ai.Next() - aInter.pos++ - aCountIdx++ - if aOK { - aCount = aBuckets[aCountIdx].value - } + bInserts = addInsert(bInserts, &bInter, aIdx) + advanceA() continue } // Otherwise we are missing a bucket that was in use in a, which is a reset. return nil, nil, false case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare. - aInter.num++ - bInter.bucketIdx = bIdx - // Advance b - if bInter.num > 0 { - bInserts = append(bInserts, bInter) - bInter.num = 0 - } - bIdx, bOK = bi.Next() - bInter.pos++ - bCountIdx++ - if bOK { - bCount = bBuckets[bCountIdx] - } + aInserts = addInsert(aInserts, &aInter, bIdx) + advanceB() } case aOK && !bOK: // b misses a value that is in a. // This is ok if the count in a is 0, in which case we make a note to // fill in the bucket in b and advance a. if aCount == 0 { - bInter.num++ - bInter.bucketIdx = aIdx - // Advance a - if aInter.num > 0 { - aInserts = append(aInserts, aInter) - aInter.num = 0 - } - aIdx, aOK = ai.Next() - aInter.pos++ // Advance potential insert position. - // Update absolute bucket counts for a. - aCountIdx++ - if aOK { - aCount = aBuckets[aCountIdx].value - } + bInserts = addInsert(bInserts, &bInter, aIdx) + advanceA() continue } // Otherwise we are missing a bucket that was in use in a, which is a reset. return nil, nil, false case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. - aInter.num++ - bInter.bucketIdx = bIdx - // Advance b - if bInter.num > 0 { - bInserts = append(bInserts, bInter) - bInter.num = 0 - } - bIdx, bOK = bi.Next() - bInter.pos++ // Advance potential insert position. - // Update absolute bucket counts for b. - bCountIdx++ - if bOK { - bCount = bBuckets[bCountIdx] - } + aInserts = addInsert(aInserts, &aInter, bIdx) + advanceB() default: // Both iterators ran out. We're done. if aInter.num > 0 { aInserts = append(aInserts, aInter) @@ -782,7 +756,7 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend // The histogram needs to be expanded to have the extra empty buckets // of the chunk. if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 { - // No new chunks from the histogram, so the spans of the appender can accommodate the new buckets. + // No new buckets from the histogram, so the spans of the appender can accommodate the new buckets. // However we need to make a copy in case the input is sharing spans from an iterator. h.PositiveSpans = make([]histogram.Span, len(a.pSpans)) copy(h.PositiveSpans, a.pSpans) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go index 0f54eb69288..4ba0c467d82 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go @@ -374,7 +374,7 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) ( // original deltas to a new set of deltas to match a new span layout that adds // buckets, we simply need to generate a list of inserts. // -// Note: Within expandSpansForward we don't have to worry about the changes to the +// Note: Within expandIntSpansAndBuckets we don't have to worry about the changes to the // spans themselves, thanks to the iterators we get to work with the more useful // bucket indices (which of course directly correspond to the buckets we have to // adjust). @@ -409,6 +409,48 @@ func expandIntSpansAndBuckets(a, b []histogram.Span, aBuckets, bBuckets []int64) bCount = bBuckets[bCountIdx] } + // addInsert updates the current Insert with a new insert at the given + // bucket index (otherIdx). + addInsert := func(inserts []Insert, insert *Insert, otherIdx int) []Insert { + if insert.num == 0 { + // First insert. + insert.bucketIdx = otherIdx + } else if insert.bucketIdx+insert.num != otherIdx { + // Insert is not continuous from previous insert. + inserts = append(inserts, *insert) + insert.num = 0 + insert.bucketIdx = otherIdx + } + insert.num++ + return inserts + } + + advanceA := func() { + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + aIdx, aOK = ai.Next() + aInter.pos++ + aCountIdx++ + if aOK { + aCount += aBuckets[aCountIdx] + } + } + + advanceB := func() { + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + bIdx, bOK = bi.Next() + bInter.pos++ + bCountIdx++ + if bOK { + bCount += bBuckets[bCountIdx] + } + } + loop: for { switch { @@ -420,105 +462,37 @@ loop: return nil, nil, false } - // Finish WIP insert for a and reset. - if aInter.num > 0 { - aInserts = append(aInserts, aInter) - aInter.num = 0 - } - - // Finish WIP insert for b and reset. - if bInter.num > 0 { - bInserts = append(bInserts, bInter) - bInter.num = 0 - } - - aIdx, aOK = ai.Next() - bIdx, bOK = bi.Next() - aInter.pos++ // Advance potential insert position. - aCountIdx++ // Advance absolute bucket count index for a. - if aOK { - aCount += aBuckets[aCountIdx] - } - bInter.pos++ // Advance potential insert position. - bCountIdx++ // Advance absolute bucket count index for b. - if bOK { - bCount += bBuckets[bCountIdx] - } + advanceA() + advanceB() continue case aIdx < bIdx: // b misses a bucket index that is in a. // This is ok if the count in a is 0, in which case we make a note to // fill in the bucket in b and advance a. if aCount == 0 { - bInter.num++ // Mark that we need to insert a bucket in b. - bInter.bucketIdx = aIdx - // Advance a - if aInter.num > 0 { - aInserts = append(aInserts, aInter) - aInter.num = 0 - } - aIdx, aOK = ai.Next() - aInter.pos++ - aCountIdx++ - if aOK { - aCount += aBuckets[aCountIdx] - } + bInserts = addInsert(bInserts, &bInter, aIdx) + advanceA() continue } // Otherwise we are missing a bucket that was in use in a, which is a reset. return nil, nil, false case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare. - aInter.num++ - aInter.bucketIdx = bIdx - // Advance b - if bInter.num > 0 { - bInserts = append(bInserts, bInter) - bInter.num = 0 - } - bIdx, bOK = bi.Next() - bInter.pos++ - bCountIdx++ - if bOK { - bCount += bBuckets[bCountIdx] - } + aInserts = addInsert(aInserts, &aInter, bIdx) + advanceB() } case aOK && !bOK: // b misses a value that is in a. // This is ok if the count in a is 0, in which case we make a note to // fill in the bucket in b and advance a. if aCount == 0 { - bInter.num++ - bInter.bucketIdx = aIdx - // Advance a - if aInter.num > 0 { - aInserts = append(aInserts, aInter) - aInter.num = 0 - } - aIdx, aOK = ai.Next() - aInter.pos++ // Advance potential insert position. - // Update absolute bucket counts for a. - aCountIdx++ - if aOK { - aCount += aBuckets[aCountIdx] - } + bInserts = addInsert(bInserts, &bInter, aIdx) + advanceA() continue } // Otherwise we are missing a bucket that was in use in a, which is a reset. return nil, nil, false case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. - aInter.num++ - aInter.bucketIdx = bIdx - // Advance b - if bInter.num > 0 { - bInserts = append(bInserts, bInter) - bInter.num = 0 - } - bIdx, bOK = bi.Next() - bInter.pos++ // Advance potential insert position. - // Update absolute bucket counts for b. - bCountIdx++ - if bOK { - bCount += bBuckets[bCountIdx] - } + aInserts = addInsert(aInserts, &aInter, bIdx) + advanceB() default: // Both iterators ran out. We're done. if aInter.num > 0 { aInserts = append(aInserts, aInter) @@ -823,7 +797,7 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h // The histogram needs to be expanded to have the extra empty buckets // of the chunk. if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 { - // No new chunks from the histogram, so the spans of the appender can accommodate the new buckets. + // No new buckets from the histogram, so the spans of the appender can accommodate the new buckets. // However we need to make a copy in case the input is sharing spans from an iterator. h.PositiveSpans = make([]histogram.Span, len(a.pSpans)) copy(h.PositiveSpans, a.pSpans) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go index 7bb31acf00c..5ee783fd683 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go @@ -284,101 +284,12 @@ type Insert struct { bucketIdx int } -// Deprecated: expandSpansForward, use expandIntSpansAndBuckets or -// expandFloatSpansAndBuckets instead. -// expandSpansForward is left here for reference. -// expandSpansForward returns the inserts to expand the bucket spans 'a' so that -// they match the spans in 'b'. 'b' must cover the same or more buckets than -// 'a', otherwise the function will return false. -// -// Example: -// -// Let's say the old buckets look like this: -// -// span syntax: [offset, length] -// spans : [ 0 , 2 ] [2,1] [ 3 , 2 ] [3,1] [1,1] -// bucket idx : [0] [1] 2 3 [4] 5 6 7 [8] [9] 10 11 12 [13] 14 [15] -// raw values 6 3 3 2 4 5 1 -// deltas 6 -3 0 -1 2 1 -4 -// -// But now we introduce a new bucket layout. (Carefully chosen example where we -// have a span appended, one unchanged[*], one prepended, and two merge - in -// that order.) -// -// [*] unchanged in terms of which bucket indices they represent. but to achieve -// that, their offset needs to change if "disrupted" by spans changing ahead of -// them -// -// \/ this one is "unchanged" -// spans : [ 0 , 3 ] [1,1] [ 1 , 4 ] [ 3 , 3 ] -// bucket idx : [0] [1] [2] 3 [4] 5 [6] [7] [8] [9] 10 11 12 [13] [14] [15] -// raw values 6 3 0 3 0 0 2 4 5 0 1 -// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1 -// delta mods: / \ / \ / \ -// -// Note for histograms with delta-encoded buckets: Whenever any new buckets are -// introduced, the subsequent "old" bucket needs to readjust its delta to the -// new base of 0. Thus, for the caller who wants to transform the set of -// original deltas to a new set of deltas to match a new span layout that adds -// buckets, we simply need to generate a list of inserts. -// -// Note: Within expandSpansForward we don't have to worry about the changes to the -// spans themselves, thanks to the iterators we get to work with the more useful -// bucket indices (which of course directly correspond to the buckets we have to -// adjust). -func expandSpansForward(a, b []histogram.Span) (forward []Insert, ok bool) { - ai := newBucketIterator(a) - bi := newBucketIterator(b) - - var inserts []Insert - - // When inter.num becomes > 0, this becomes a valid insert that should - // be yielded when we finish a streak of new buckets. - var inter Insert - - av, aOK := ai.Next() - bv, bOK := bi.Next() -loop: - for { - switch { - case aOK && bOK: - switch { - case av == bv: // Both have an identical value. move on! - // Finish WIP insert and reset. - if inter.num > 0 { - inserts = append(inserts, inter) - } - inter.num = 0 - av, aOK = ai.Next() - bv, bOK = bi.Next() - inter.pos++ - case av < bv: // b misses a value that is in a. - return inserts, false - case av > bv: // a misses a value that is in b. Forward b and recompare. - inter.num++ - bv, bOK = bi.Next() - } - case aOK && !bOK: // b misses a value that is in a. - return inserts, false - case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. - inter.num++ - bv, bOK = bi.Next() - default: // Both iterators ran out. We're done. - if inter.num > 0 { - inserts = append(inserts, inter) - } - break loop - } - } - - return inserts, true -} - -// expandSpansBothWays is similar to expandSpansForward, but now b may also -// cover an entirely different set of buckets. The function returns the -// “forward” inserts to expand 'a' to also cover all the buckets exclusively -// covered by 'b', and it returns the “backward” inserts to expand 'b' to also -// cover all the buckets exclusively covered by 'a'. +// expandSpansBothWays is similar to expandFloatSpansAndBuckets and +// expandIntSpansAndBuckets, but now b may also cover an entirely different set +// of buckets and counter resets are ignored. The function returns the “forward” +// inserts to expand 'a' to also cover all the buckets exclusively covered by +// 'b', and it returns the “backward” inserts to expand 'b' to also cover all +// the buckets exclusively covered by 'a'. func expandSpansBothWays(a, b []histogram.Span) (forward, backward []Insert, mergedSpans []histogram.Span) { ai := newBucketIterator(a) bi := newBucketIterator(b) @@ -488,14 +399,24 @@ func insert[BV bucketValue](in, out []BV, inserts []Insert, deltas bool) []BV { ii int // The next insert to process. ) for i, d := range in { - if ii < len(inserts) && i == inserts[ii].pos { + if ii >= len(inserts) || i != inserts[ii].pos { + // No inserts at this position, the original delta is still valid. + out[oi] = d + oi++ + v += d + continue + } + // Process inserts. + firstInsert := true + for ii < len(inserts) && i == inserts[ii].pos { // We have an insert! // Add insert.num new delta values such that their // bucket values equate 0. When deltas==false, it means // that it is an absolute value. So we set it to 0 // directly. - if deltas { + if deltas && firstInsert { out[oi] = -v + firstInsert = false // No need to go to 0 in further inserts. } else { out[oi] = 0 } @@ -505,32 +426,30 @@ func insert[BV bucketValue](in, out []BV, inserts []Insert, deltas bool) []BV { oi++ } ii++ - - // Now save the value from the input. The delta value we - // should save is the original delta value + the last - // value of the point before the insert (to undo the - // delta that was introduced by the insert). When - // deltas==false, it means that it is an absolute value, - // so we set it directly to the value in the 'in' slice. - if deltas { - out[oi] = d + v - } else { - out[oi] = d - } - oi++ - v = d + v - continue } - // If there was no insert, the original delta is still valid. - out[oi] = d + // Now save the value from the input. The delta value we + // should save is the original delta value + the last + // value of the point before the insert (to undo the + // delta that was introduced by the insert). When + // deltas==false, it means that it is an absolute value, + // so we set it directly to the value in the 'in' slice. + if deltas { + out[oi] = d + v + } else { + out[oi] = d + } oi++ v += d } - switch ii { - case len(inserts): - // All inserts processed. Nothing more to do. - case len(inserts) - 1: - // One more insert to process at the end. + // Insert empty buckets at the end. + for ii < len(inserts) { + if inserts[ii].pos < len(in) { + panic("leftover inserts must be after the current buckets") + } + // Add insert.num new delta values such that their + // bucket values equate 0. When deltas==false, it means + // that it is an absolute value. So we set it to 0 + // directly. if deltas { out[oi] = -v } else { @@ -541,8 +460,8 @@ func insert[BV bucketValue](in, out []BV, inserts []Insert, deltas bool) []BV { out[oi] = 0 oi++ } - default: - panic("unprocessed inserts left") + ii++ + v = 0 } return out } @@ -628,7 +547,7 @@ func adjustForInserts(spans []histogram.Span, inserts []Insert) (mergedSpans []h } } for i < len(inserts) { - addBucket(inserts[i].bucketIdx) + addBucket(insertIdx) consumeInsert() } return diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 66738751331..9101a474b75 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -191,14 +191,6 @@ func DefaultPostingsDecoderFactory(_ *BlockMeta) index.PostingsDecoder { return index.DecodePostingsRaw } -func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { - return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ - MaxBlockChunkSegmentSize: maxBlockChunkSegmentSize, - MergeFunc: mergeFunc, - EnableOverlappingCompaction: true, - }) -} - func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ MergeFunc: mergeFunc, diff --git a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go index 5da360b69ab..f8070ff3431 100644 --- a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go +++ b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go @@ -147,7 +147,7 @@ var ( IncompatibleBucketLayoutInBinOpWarning = fmt.Errorf("%w: incompatible bucket layout encountered for binary operator", PromQLWarning) PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo) - PossibleNonCounterLabelInfo = fmt.Errorf("%w: metric might not be a counter, __type__ label is not set to %q", PromQLInfo, model.MetricTypeCounter) + PossibleNonCounterLabelInfo = fmt.Errorf("%w: metric might not be a counter, __type__ label is not set to %q or %q", PromQLInfo, model.MetricTypeCounter, model.MetricTypeHistogram) HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) IncompatibleTypesInBinOpInfo = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo) HistogramIgnoredInAggregationInfo = fmt.Errorf("%w: ignored histogram in", PromQLInfo) diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index 180be686122..4f3926a2ea8 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -544,8 +544,7 @@ func extractQueryOpts(r *http.Request) (promql.QueryOpts, error) { duration = parsedDuration } - //nolint:staticcheck - return promql.NewPrometheusQueryOpts(r.FormValue("stats") == "all", duration, model.NameValidationScheme), nil + return promql.NewPrometheusQueryOpts(r.FormValue("stats") == "all", duration), nil } func (api *API) queryRange(r *http.Request) (result apiFuncResult) { diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index c9455b50b9b..425ec574d65 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.238.0" +const Version = "0.239.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index 53dd6d84475..24e7bee1f40 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1099,7 +1099,7 @@ github.com/pmezard/go-difflib/difflib # github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c ## explicit; go 1.14 github.com/power-devops/perfstat -# github.com/prometheus/alertmanager v0.28.1 => github.com/juliusmh/alertmanager v0.26.1-0.20250624114102-96969065f8d2 +# github.com/prometheus/alertmanager v0.28.1 => github.com/grafana/prometheus-alertmanager v0.25.1-0.20250722103749-329f0c4df1ba ## explicit; go 1.23.0 github.com/prometheus/alertmanager/api github.com/prometheus/alertmanager/api/metrics @@ -1169,7 +1169,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.2 ## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 +# github.com/prometheus/common v0.65.1-0.20250711183725-0e1982f10d4c ## explicit; go 1.23.0 github.com/prometheus/common/config github.com/prometheus/common/expfmt @@ -1192,7 +1192,7 @@ github.com/prometheus/otlptranslator github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v1.8.2-0.20250717103207-acaa45ca4d40 +# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v1.8.2-0.20250725123259-c4bd4faba234 ## explicit; go 1.23.0 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1798,7 +1798,7 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/api v0.238.0 +# google.golang.org/api v0.239.0 ## explicit; go 1.23.0 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -2112,13 +2112,13 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v1.8.2-0.20250717103207-acaa45ca4d40 -# github.com/prometheus/alertmanager => github.com/juliusmh/alertmanager v0.26.1-0.20250624114102-96969065f8d2 +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v1.8.2-0.20250725123259-c4bd4faba234 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20250428154222-f7d51a6f6700 # gopkg.in/yaml.v3 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20240531075221-3685f1377d7b # github.com/munnerz/goautoneg => github.com/grafana/goautoneg v0.0.0-20240607115440-f335c04c58ce # github.com/opentracing-contrib/go-stdlib => github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956 # github.com/opentracing-contrib/go-grpc => github.com/charleskorn/go-grpc v0.0.0-20231024023642-e9298576254f +# github.com/prometheus/alertmanager => github.com/grafana/prometheus-alertmanager v0.25.1-0.20250722103749-329f0c4df1ba # github.com/prometheus/otlptranslator => github.com/grafana/mimir-otlptranslator v0.0.0-20250703083430-c31a9568ad96 # github.com/thanos-io/objstore => github.com/charleskorn/objstore v0.0.0-20250527065533-21d4c0c463eb From f47e6c9f95b87f6c152d8ba536bba28b55154392 Mon Sep 17 00:00:00 2001 From: Julius Hinze Date: Wed, 23 Jul 2025 12:19:56 +0200 Subject: [PATCH 06/10] streamingpromql: always use UTF8Validation --- .../querysharding_test_utils_test.go | 2 +- .../request_validation_test.go | 9 +- pkg/mimir/modules.go | 6 +- pkg/querier/querier.go | 22 +---- .../benchmarks/comparison_test.go | 7 +- .../compat/name_validating_engine.go | 88 ------------------- pkg/streamingpromql/engine.go | 11 +-- .../engine_concurrency_test.go | 3 +- pkg/streamingpromql/engine_test.go | 63 +++++++------ pkg/streamingpromql/functions_test.go | 3 +- .../aggregations/aggregation_test.go | 12 +-- .../operators/aggregations/count_values.go | 6 +- .../aggregations/count_values_test.go | 23 +---- .../operators/functions/factories.go | 50 +++++------ .../operators/functions/label.go | 12 +-- pkg/streamingpromql/planning.go | 3 +- .../planning/core/aggregate_expression.go | 11 +-- .../planning/core/function_call.go | 2 +- pkg/streamingpromql/planning/plan.go | 2 - pkg/streamingpromql/query.go | 10 +-- .../main.go | 3 +- 21 files changed, 86 insertions(+), 262 deletions(-) delete mode 100644 pkg/streamingpromql/compat/name_validating_engine.go diff --git a/pkg/frontend/querymiddleware/querysharding_test_utils_test.go b/pkg/frontend/querymiddleware/querysharding_test_utils_test.go index 0e55f1c375e..543148ce81a 100644 --- a/pkg/frontend/querymiddleware/querysharding_test_utils_test.go +++ b/pkg/frontend/querymiddleware/querysharding_test_utils_test.go @@ -333,7 +333,7 @@ func newEngineForTesting(t *testing.T, engine string, opts ...engineOpt) (promql case querier.PrometheusEngine: return promOpts, promql.NewEngine(promOpts) case querier.MimirEngine: - limits := streamingpromql.NewStaticQueryLimitsProvider(0, model.UTF8Validation) + limits := streamingpromql.NewStaticQueryLimitsProvider(0) metrics := stats.NewQueryMetrics(promOpts.Reg) planner := streamingpromql.NewQueryPlanner(mqeOpts) logger := log.NewNopLogger() diff --git a/pkg/frontend/querymiddleware/request_validation_test.go b/pkg/frontend/querymiddleware/request_validation_test.go index 6d10251f979..882fe65cea6 100644 --- a/pkg/frontend/querymiddleware/request_validation_test.go +++ b/pkg/frontend/querymiddleware/request_validation_test.go @@ -208,9 +208,12 @@ func TestCardinalityQueryRequestValidationRoundTripper(t *testing.T) { expectedErrType: apierror.TypeBadData, }, { - // TODO: Check - // non-legacy label name will be accepted - url: cardinalityLabelValuesPathSuffix + "?label_names[]=\\xbd\\xb2\\x3d\\xbc\\x20\\xe2\\x8c\\x98", + // non-utf8 label name will be rejected even when we transition to UTF-8 label names + url: cardinalityLabelValuesPathSuffix + "?label_names[]=\xbd\xb2\x3d\xbc\x20\xe2\x8c\x98", + expectedErrType: apierror.TypeBadData, + }, + { + url: cardinalityLabelValuesPathSuffix + "?label_names[]=some.label", expectedErrType: "", }, { diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go index 0920a66e8ee..7d732e165d6 100644 --- a/pkg/mimir/modules.go +++ b/pkg/mimir/modules.go @@ -28,7 +28,6 @@ import ( "github.com/prometheus/alertmanager/matchers/compat" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/rules" @@ -806,10 +805,9 @@ func (t *Mimir) initQueryFrontendTripperware() (serv services.Service, err error var eng promql.QueryEngine switch t.Cfg.Frontend.QueryEngine { case querier.PrometheusEngine: - // TODO: Decide whether this is a good idea. - eng = streamingpromqlcompat.NameValidatingEngine(promql.NewEngine(promOpts), t.Overrides) + eng = promql.NewEngine(promOpts) case querier.MimirEngine: - streamingEngine, err := streamingpromql.NewEngine(mqeOpts, streamingpromql.NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(mqeOpts.CommonOpts.Reg), t.QueryPlanner, util_log.Logger) + streamingEngine, err := streamingpromql.NewEngine(mqeOpts, streamingpromql.NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(mqeOpts.CommonOpts.Reg), t.QueryPlanner, util_log.Logger) if err != nil { return nil, fmt.Errorf("unable to create Mimir Query Engine: %w", err) } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index ed838a52714..b4d46f8b634 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -185,8 +185,7 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, quer switch cfg.QueryEngine { case PrometheusEngine: - // TODO: Check whether this approach is a good idea. - eng = compat.NameValidatingEngine(promql.NewEngine(opts), limits) + eng = promql.NewEngine(opts) case MimirEngine: limitsProvider := NewTenantQueryLimitsProvider(limits) streamingEngine, err := streamingpromql.NewEngine(mqeOpts, limitsProvider, queryMetrics, planner, logger) @@ -195,8 +194,7 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, quer } if cfg.EnableQueryEngineFallback { - // TODO: Check whether this approach is a good idea. - prometheusEngine := compat.NameValidatingEngine(promql.NewEngine(opts), limits) + prometheusEngine := promql.NewEngine(opts) eng = compat.NewEngineWithFallback(streamingEngine, prometheusEngine, reg, logger) } else { eng = streamingEngine @@ -795,19 +793,3 @@ func (p *TenantQueryLimitsProvider) GetMaxEstimatedMemoryConsumptionPerQuery(ctx return totalLimit, nil } - -// GetValidationScheme computes the validation scheme for tenants injected into ctx. Returns LegacyValidation if -// at least one tenant uses LegacyValidation, UTF8Validation otherwise. -func (p *TenantQueryLimitsProvider) GetValidationScheme(ctx context.Context) (model.ValidationScheme, error) { - tenantIDs, err := tenant.TenantIDs(ctx) - if err != nil { - return 0, err - } - for _, tenantID := range tenantIDs { - validationScheme := p.limits.ValidationScheme(tenantID) - if validationScheme == model.LegacyValidation { - return validationScheme, nil - } - } - return model.UTF8Validation, nil -} diff --git a/pkg/streamingpromql/benchmarks/comparison_test.go b/pkg/streamingpromql/benchmarks/comparison_test.go index 979d8b38adc..2cfb59927fc 100644 --- a/pkg/streamingpromql/benchmarks/comparison_test.go +++ b/pkg/streamingpromql/benchmarks/comparison_test.go @@ -20,7 +20,6 @@ import ( "github.com/grafana/dskit/services" "github.com/grafana/dskit/test" "github.com/grafana/dskit/user" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" @@ -44,7 +43,7 @@ func BenchmarkQuery(b *testing.B) { opts := streamingpromql.NewTestEngineOpts() prometheusEngine := promql.NewEngine(opts.CommonOpts) - mimirEngine, err := streamingpromql.NewEngine(opts, streamingpromql.NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), streamingpromql.NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := streamingpromql.NewEngine(opts, streamingpromql.NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), streamingpromql.NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(b, err) // Important: the names below must remain in sync with the names used in tools/benchmark-query-engine. @@ -96,7 +95,7 @@ func TestBothEnginesReturnSameResultsForBenchmarkQueries(t *testing.T) { opts := streamingpromql.NewTestEngineOpts() prometheusEngine := promql.NewEngine(opts.CommonOpts) - limitsProvider := streamingpromql.NewStaticQueryLimitsProvider(0, model.UTF8Validation) + limitsProvider := streamingpromql.NewStaticQueryLimitsProvider(0) queryMetrics := stats.NewQueryMetrics(nil) mimirEngine, err := streamingpromql.NewEngine(opts, limitsProvider, queryMetrics, streamingpromql.NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) @@ -125,7 +124,7 @@ func TestBenchmarkSetup(t *testing.T) { q := createBenchmarkQueryable(t, []int{1}) opts := streamingpromql.NewTestEngineOpts() - mimirEngine, err := streamingpromql.NewEngine(opts, streamingpromql.NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), streamingpromql.NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := streamingpromql.NewEngine(opts, streamingpromql.NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), streamingpromql.NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) ctx := user.InjectOrgID(context.Background(), UserID) diff --git a/pkg/streamingpromql/compat/name_validating_engine.go b/pkg/streamingpromql/compat/name_validating_engine.go deleted file mode 100644 index c1acdf190c9..00000000000 --- a/pkg/streamingpromql/compat/name_validating_engine.go +++ /dev/null @@ -1,88 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only - -package compat - -import ( - "context" - "time" - - "github.com/grafana/dskit/tenant" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/storage" - - "github.com/grafana/mimir/pkg/util/validation" -) - -type nameValidatingEngine struct { - engine promql.QueryEngine - limits *validation.Overrides -} - -// NameValidatingEngine creates a new promql.QueryEngine that wraps engine and overrides query options -// with the name validation scheme from limits. -func NameValidatingEngine(engine promql.QueryEngine, limits *validation.Overrides) promql.QueryEngine { - return &nameValidatingEngine{engine: engine, limits: limits} -} - -type optsWithValidationScheme struct { - promql.QueryOpts - validationScheme model.ValidationScheme -} - -func (o optsWithValidationScheme) EnablePerStepStats() bool { - return o.QueryOpts.EnablePerStepStats() -} - -func (o optsWithValidationScheme) LookbackDelta() time.Duration { - return o.QueryOpts.LookbackDelta() -} - -func (o optsWithValidationScheme) ValidationScheme() model.ValidationScheme { - return o.validationScheme -} - -func (e nameValidatingEngine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) { - validationScheme, err := e.getValidationScheme(ctx) - if err != nil { - return nil, err - } - if opts == nil { - opts = promql.NewPrometheusQueryOpts(false, 0, model.UTF8Validation) - } - opts = &optsWithValidationScheme{ - QueryOpts: opts, - validationScheme: validationScheme, - } - return e.engine.NewInstantQuery(ctx, q, opts, qs, ts) -} - -func (e nameValidatingEngine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) { - validationScheme, err := e.getValidationScheme(ctx) - if err != nil { - return nil, err - } - if opts == nil { - opts = promql.NewPrometheusQueryOpts(false, 0, model.UTF8Validation) - } - opts = &optsWithValidationScheme{ - QueryOpts: opts, - validationScheme: validationScheme, - } - return e.engine.NewRangeQuery(ctx, q, opts, qs, start, end, interval) -} - -// getValidationScheme retrieves the name validation scheme to use from a context containing tenant IDs. -// Returns legacy validation scheme if at least one tenant uses legacy validation. -func (e nameValidatingEngine) getValidationScheme(ctx context.Context) (model.ValidationScheme, error) { - tenantIDs, err := tenant.TenantIDs(ctx) - if err != nil { - return model.UnsetValidation, err - } - for _, tenantID := range tenantIDs { - if e.limits.ValidationScheme(tenantID) == model.LegacyValidation { - return model.LegacyValidation, nil - } - } - return model.UTF8Validation, nil -} diff --git a/pkg/streamingpromql/engine.go b/pkg/streamingpromql/engine.go index 998e99ef560..958511dcb01 100644 --- a/pkg/streamingpromql/engine.go +++ b/pkg/streamingpromql/engine.go @@ -15,7 +15,6 @@ import ( "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" "go.opentelemetry.io/otel" @@ -144,33 +143,25 @@ func (e *Engine) newQueryFromPlanner(ctx context.Context, q storage.Queryable, o type QueryLimitsProvider interface { // GetMaxEstimatedMemoryConsumptionPerQuery returns the maximum estimated memory allowed to be consumed by a query in bytes, or 0 to disable the limit. GetMaxEstimatedMemoryConsumptionPerQuery(ctx context.Context) (uint64, error) - // GetValidationScheme returns the label/metric name validation scheme to use for a query. - GetValidationScheme(ctx context.Context) (model.ValidationScheme, error) } // NewStaticQueryLimitsProvider returns a QueryLimitsProvider that always returns the provided limits. // // This should generally only be used in tests. -func NewStaticQueryLimitsProvider(maxEstimatedMemoryConsumptionPerQuery uint64, validationScheme model.ValidationScheme) QueryLimitsProvider { +func NewStaticQueryLimitsProvider(maxEstimatedMemoryConsumptionPerQuery uint64) QueryLimitsProvider { return staticQueryLimitsProvider{ maxEstimatedMemoryConsumptionPerQuery: maxEstimatedMemoryConsumptionPerQuery, - validationScheme: validationScheme, } } type staticQueryLimitsProvider struct { maxEstimatedMemoryConsumptionPerQuery uint64 - validationScheme model.ValidationScheme } func (p staticQueryLimitsProvider) GetMaxEstimatedMemoryConsumptionPerQuery(_ context.Context) (uint64, error) { return p.maxEstimatedMemoryConsumptionPerQuery, nil } -func (p staticQueryLimitsProvider) GetValidationScheme(_ context.Context) (model.ValidationScheme, error) { - return p.validationScheme, nil -} - type NoopQueryTracker struct{} func (n *NoopQueryTracker) GetMaxConcurrent() int { diff --git a/pkg/streamingpromql/engine_concurrency_test.go b/pkg/streamingpromql/engine_concurrency_test.go index 8430fc68efc..2ba1abdcd95 100644 --- a/pkg/streamingpromql/engine_concurrency_test.go +++ b/pkg/streamingpromql/engine_concurrency_test.go @@ -10,7 +10,6 @@ import ( "time" "github.com/go-kit/log" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/promqltest" @@ -188,7 +187,7 @@ func TestConcurrentQueries(t *testing.T) { t.Cleanup(func() { require.NoError(t, storage.Close()) }) opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) // Populate the expected result for each query. diff --git a/pkg/streamingpromql/engine_test.go b/pkg/streamingpromql/engine_test.go index 315ed8b3068..f12ad73e7e6 100644 --- a/pkg/streamingpromql/engine_test.go +++ b/pkg/streamingpromql/engine_test.go @@ -22,7 +22,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" @@ -95,7 +94,7 @@ func requireQueryIsUnsupported(t *testing.T, expression string, expectedError st func requireRangeQueryIsUnsupported(t *testing.T, expression string, expectedError string) { opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) qry, err := engine.NewRangeQuery(context.Background(), nil, nil, expression, time.Now().Add(-time.Hour), time.Now(), time.Minute) @@ -106,7 +105,7 @@ func requireRangeQueryIsUnsupported(t *testing.T, expression string, expectedErr func requireInstantQueryIsUnsupported(t *testing.T, expression string, expectedError string) { opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) qry, err := engine.NewInstantQuery(context.Background(), nil, nil, expression, time.Now()) @@ -118,7 +117,7 @@ func requireInstantQueryIsUnsupported(t *testing.T, expression string, expectedE func TestNewRangeQuery_InvalidQueryTime(t *testing.T) { opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) ctx := context.Background() @@ -132,7 +131,7 @@ func TestNewRangeQuery_InvalidQueryTime(t *testing.T) { func TestNewRangeQuery_InvalidExpressionTypes(t *testing.T) { opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) ctx := context.Background() @@ -148,7 +147,7 @@ func TestNewInstantQuery_Strings(t *testing.T) { opts := NewTestEngineOpts() prometheusEngine := promql.NewEngine(opts.CommonOpts) - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) storage := promqltest.LoadedStorage(t, ``) @@ -172,7 +171,7 @@ func TestNewInstantQuery_Strings(t *testing.T) { // Once the streaming engine supports all PromQL features exercised by Prometheus' test cases, we can remove these files and instead call promql.RunBuiltinTests here instead. func TestUpstreamTestCases(t *testing.T) { opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) testdataFS := os.DirFS("./testdata") @@ -196,7 +195,7 @@ func TestUpstreamTestCases(t *testing.T) { func TestOurTestCases(t *testing.T) { opts := NewTestEngineOpts() - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) prometheusEngine := promql.NewEngine(opts.CommonOpts) @@ -239,7 +238,7 @@ func TestOurTestCases(t *testing.T) { func TestRangeVectorSelectors(t *testing.T) { opts := NewTestEngineOpts() prometheusEngine := promql.NewEngine(opts.CommonOpts) - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) baseT := timestamp.Time(0) @@ -785,7 +784,7 @@ func TestSubqueries(t *testing.T) { opts := NewTestEngineOpts() opts.CommonOpts.EnablePerStepStats = true prometheusEngine := promql.NewEngine(opts.CommonOpts) - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) storage := promqltest.LoadedStorage(t, data) @@ -1209,7 +1208,7 @@ func TestSubqueries(t *testing.T) { func TestQueryCancellation(t *testing.T) { opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) // Simulate the query being cancelled by another goroutine by waiting for the Select() call to be made, @@ -1237,7 +1236,7 @@ func TestQueryCancellation(t *testing.T) { func TestQueryTimeout(t *testing.T) { opts := NewTestEngineOpts() opts.CommonOpts.Timeout = 20 * time.Millisecond - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) // Simulate the query doing some work and check that the query context has been cancelled. @@ -1303,7 +1302,7 @@ func (w cancellationQuerier) waitForCancellation(ctx context.Context) error { func TestQueryContextCancelledOnceQueryFinished(t *testing.T) { opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) storage := promqltest.LoadedStorage(t, ` @@ -1513,7 +1512,7 @@ func TestMemoryConsumptionLimit_SingleQueries(t *testing.T) { opts := NewTestEngineOpts() opts.CommonOpts.Reg = reg - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(limit, model.UTF8Validation), stats.NewQueryMetrics(reg), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(limit), stats.NewQueryMetrics(reg), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) spanExporter.Reset() @@ -1632,7 +1631,7 @@ func TestMemoryConsumptionLimit_MultipleQueries(t *testing.T) { opts.CommonOpts.Reg = reg limit := 32*types.FPointSize + 4*types.SeriesMetadataSize + 3*uint64(labels.FromStrings(labels.MetricName, "some_metric", "idx", "i").ByteSize()) - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(limit, model.UTF8Validation), stats.NewQueryMetrics(reg), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(limit), stats.NewQueryMetrics(reg), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) runQuery := func(expr string, shouldSucceed bool) { @@ -1704,7 +1703,7 @@ func TestActiveQueryTracker_SuccessfulQuery(t *testing.T) { opts.CommonOpts.ActiveQueryTracker = tracker planner := NewQueryPlanner(opts) - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), planner, log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), planner, log.NewNopLogger()) require.NoError(t, err) testActiveQueryTracker( @@ -1765,7 +1764,7 @@ func TestActiveQueryTracker_FailedQuery(t *testing.T) { opts := NewTestEngineOpts() tracker := &testQueryTracker{} opts.CommonOpts.ActiveQueryTracker = tracker - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) innerStorage := promqltest.LoadedStorage(t, "") @@ -1856,7 +1855,7 @@ func TestActiveQueryTracker_WaitingForTrackerIncludesQueryTimeout(t *testing.T) opts := NewTestEngineOpts() opts.CommonOpts.Timeout = 10 * time.Millisecond opts.CommonOpts.ActiveQueryTracker = tracker - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) queryTypes := map[string]func() (promql.Query, error){ @@ -1933,7 +1932,7 @@ func runAnnotationTests(t *testing.T, testCases map[string]annotationTestCase) { endT := startT.Add(2 * step) opts := NewTestEngineOpts() - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) prometheusEngine := promql.NewEngine(opts.CommonOpts) @@ -2948,7 +2947,7 @@ func runMixedMetricsTests(t *testing.T, expressions []string, pointsPerSeries in // - Look backs opts := NewTestEngineOpts() - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) prometheusEngine := promql.NewEngine(opts.CommonOpts) @@ -3186,7 +3185,7 @@ func TestCompareVariousMixedMetricsComparisonOps(t *testing.T) { func TestQueryStats(t *testing.T) { opts := NewTestEngineOpts() opts.CommonOpts.EnablePerStepStats = true - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) prometheusEngine := promql.NewEngine(opts.CommonOpts) @@ -3214,7 +3213,7 @@ func TestQueryStats(t *testing.T) { runQueryAndGetSamplesStats := func(t *testing.T, engine promql.QueryEngine, expr string, isInstantQuery bool) *promstats.QuerySamples { var q promql.Query var err error - opts := promql.NewPrometheusQueryOpts(true, 0, model.LegacyValidation) + opts := promql.NewPrometheusQueryOpts(true, 0) if isInstantQuery { q, err = engine.NewInstantQuery(context.Background(), storage, opts, expr, end) } else { @@ -3490,7 +3489,7 @@ func TestQueryStatsUpstreamTestCases(t *testing.T) { // TestCases are taken from Prometheus' TestQueryStatistics. opts := NewTestEngineOpts() opts.CommonOpts.EnablePerStepStats = true - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) prometheusEngine := promql.NewEngine(opts.CommonOpts) @@ -3508,7 +3507,7 @@ func TestQueryStatsUpstreamTestCases(t *testing.T) { runQueryAndGetSamplesStats := func(t *testing.T, engine promql.QueryEngine, expr string, start, end time.Time, interval time.Duration) *promstats.QuerySamples { var q promql.Query var err error - opts := promql.NewPrometheusQueryOpts(true, 0, model.LegacyValidation) + opts := promql.NewPrometheusQueryOpts(true, 0) if interval == 0 { // Instant query @@ -3872,7 +3871,7 @@ func TestQueryStatsUpstreamTestCases(t *testing.T) { } func TestQueryStatementLookbackDelta(t *testing.T) { - limitsProvider := NewStaticQueryLimitsProvider(0, model.UTF8Validation) + limitsProvider := NewStaticQueryLimitsProvider(0) stats := stats.NewQueryMetrics(nil) logger := log.NewNopLogger() @@ -3889,7 +3888,7 @@ func TestQueryStatementLookbackDelta(t *testing.T) { require.NoError(t, err) t.Run("lookback delta not set in query options", func(t *testing.T) { - queryOpts := promql.NewPrometheusQueryOpts(false, 0, model.LegacyValidation) + queryOpts := promql.NewPrometheusQueryOpts(false, 0) runTest(t, engine, queryOpts, defaultLookbackDelta) }) @@ -3898,7 +3897,7 @@ func TestQueryStatementLookbackDelta(t *testing.T) { }) t.Run("lookback delta set in query options", func(t *testing.T) { - queryOpts := promql.NewPrometheusQueryOpts(false, 14*time.Minute, model.LegacyValidation) + queryOpts := promql.NewPrometheusQueryOpts(false, 14*time.Minute) runTest(t, engine, queryOpts, 14*time.Minute) }) }) @@ -3910,7 +3909,7 @@ func TestQueryStatementLookbackDelta(t *testing.T) { require.NoError(t, err) t.Run("lookback delta not set in query options", func(t *testing.T) { - queryOpts := promql.NewPrometheusQueryOpts(false, 0, model.LegacyValidation) + queryOpts := promql.NewPrometheusQueryOpts(false, 0) runTest(t, engine, queryOpts, 12*time.Minute) }) @@ -3919,7 +3918,7 @@ func TestQueryStatementLookbackDelta(t *testing.T) { }) t.Run("lookback delta set in query options", func(t *testing.T) { - queryOpts := promql.NewPrometheusQueryOpts(false, 14*time.Minute, model.LegacyValidation) + queryOpts := promql.NewPrometheusQueryOpts(false, 14*time.Minute) runTest(t, engine, queryOpts, 14*time.Minute) }) }) @@ -3939,7 +3938,7 @@ func TestQueryClose(t *testing.T) { t.Cleanup(func() { require.NoError(t, storage.Close()) }) opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) start := timestamp.Time(0) @@ -3971,7 +3970,7 @@ func TestEagerLoadSelectors(t *testing.T) { t.Cleanup(func() { require.NoError(t, storage.Close()) }) - limitsProvider := NewStaticQueryLimitsProvider(0, model.UTF8Validation) + limitsProvider := NewStaticQueryLimitsProvider(0) metrics := stats.NewQueryMetrics(nil) logger := log.NewNopLogger() optsWithoutEagerLoading := NewTestEngineOpts() @@ -4096,7 +4095,7 @@ func TestInstantQueryDurationExpression(t *testing.T) { opts := NewTestEngineOpts() prometheusEngine := promql.NewEngine(opts.CommonOpts) - mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) ctx := context.Background() diff --git a/pkg/streamingpromql/functions_test.go b/pkg/streamingpromql/functions_test.go index 4d63430d312..1585b91d1fc 100644 --- a/pkg/streamingpromql/functions_test.go +++ b/pkg/streamingpromql/functions_test.go @@ -8,7 +8,6 @@ import ( "time" "github.com/go-kit/log" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/promqltest" @@ -30,7 +29,7 @@ func TestFunctionDeduplicateAndMerge(t *testing.T) { storage := promqltest.LoadedStorage(t, data) opts := NewTestEngineOpts() - engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), NewQueryPlanner(opts), log.NewNopLogger()) require.NoError(t, err) ctx := context.Background() diff --git a/pkg/streamingpromql/operators/aggregations/aggregation_test.go b/pkg/streamingpromql/operators/aggregations/aggregation_test.go index 882f7306f61..f26cb689b28 100644 --- a/pkg/streamingpromql/operators/aggregations/aggregation_test.go +++ b/pkg/streamingpromql/operators/aggregations/aggregation_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" @@ -338,16 +337,7 @@ func TestAggregations_ReturnIncompleteGroupsOnEarlyClose(t *testing.T) { "count_values": { createOperator: func(inner types.InstantVectorOperator, queryTimeRange types.QueryTimeRange, memoryConsumptionTracker *limiter.MemoryConsumptionTracker) (types.InstantVectorOperator, error) { labelName := operators.NewStringLiteral("value", posrange.PositionRange{}) - return NewCountValues( - inner, - labelName, - queryTimeRange, - []string{"group"}, - false, - memoryConsumptionTracker, - posrange.PositionRange{}, - model.LegacyValidation, - ), nil + return NewCountValues(inner, labelName, queryTimeRange, []string{"group"}, false, memoryConsumptionTracker, posrange.PositionRange{}), nil }, instant: true, allowExpectedSeriesInAnyOrder: true, diff --git a/pkg/streamingpromql/operators/aggregations/count_values.go b/pkg/streamingpromql/operators/aggregations/count_values.go index 7900e7d5c96..65e85bafb0f 100644 --- a/pkg/streamingpromql/operators/aggregations/count_values.go +++ b/pkg/streamingpromql/operators/aggregations/count_values.go @@ -39,8 +39,6 @@ type CountValues struct { labelsBuilder *labels.Builder labelsBytesBuffer []byte valueBuffer []byte - - nameValidationScheme model.ValidationScheme } var _ types.InstantVectorOperator = &CountValues{} @@ -53,7 +51,6 @@ func NewCountValues( without bool, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, expressionPosition posrange.PositionRange, - nameValidationScheme model.ValidationScheme, ) *CountValues { if without { grouping = append(grouping, labels.MetricName) @@ -69,7 +66,6 @@ func NewCountValues( Without: without, MemoryConsumptionTracker: memoryConsumptionTracker, expressionPosition: expressionPosition, - nameValidationScheme: nameValidationScheme, } } @@ -158,7 +154,7 @@ func (c *CountValues) SeriesMetadata(ctx context.Context) ([]types.SeriesMetadat func (c *CountValues) loadLabelName() error { c.resolvedLabelName = c.LabelName.GetValue() - if !labels.IsValidLabelName(c.resolvedLabelName, c.nameValidationScheme) { + if !model.LabelName(c.resolvedLabelName).IsValid() { return fmt.Errorf("invalid label name %q", c.resolvedLabelName) } diff --git a/pkg/streamingpromql/operators/aggregations/count_values_test.go b/pkg/streamingpromql/operators/aggregations/count_values_test.go index 959b5d7564c..b6b13225e79 100644 --- a/pkg/streamingpromql/operators/aggregations/count_values_test.go +++ b/pkg/streamingpromql/operators/aggregations/count_values_test.go @@ -6,7 +6,6 @@ import ( "context" "testing" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" @@ -50,11 +49,6 @@ func TestCountValues_GroupLabelling(t *testing.T) { inputSeries: labels.FromStrings(labels.MetricName, "my_metric", "env", "prod", "foo", "bar"), expectedOutputSeries: labels.FromStrings("env", "prod", "value", "123"), }, - "grouping with 'by', single utf8 grouping label, input does have grouping label": { - grouping: []string{"env😀"}, - inputSeries: labels.FromStrings(labels.MetricName, "my_metric", "env😀", "prod", "foo", "bar"), - expectedOutputSeries: labels.FromStrings("env😀", "prod", "value", "123"), - }, "grouping with 'by', multiple grouping labels, input has only metric name": { grouping: []string{"cluster", "env"}, inputSeries: labels.FromStrings(labels.MetricName, "my_metric"), @@ -115,12 +109,6 @@ func TestCountValues_GroupLabelling(t *testing.T) { inputSeries: labels.FromStrings(labels.MetricName, "my_metric", "env", "prod", "a-label", "a-value", "f-label", "f-value"), expectedOutputSeries: labels.FromStrings("a-label", "a-value", "f-label", "f-value", "value", "123"), }, - "grouping with 'without', single utf8 grouping label, input does have grouping label": { - grouping: []string{"env😀"}, - without: true, - inputSeries: labels.FromStrings(labels.MetricName, "my_metric", "env😀", "prod", "a-label", "a-value", "f-label", "f-value"), - expectedOutputSeries: labels.FromStrings("a-label", "a-value", "f-label", "f-value", "value", "123"), - }, "grouping with 'without', multiple grouping labels, input has only metric name": { grouping: []string{"cluster", "env"}, without: true, @@ -234,16 +222,7 @@ func TestCountValues_GroupLabelling(t *testing.T) { } labelName := operators.NewStringLiteral("value", posrange.PositionRange{}) - aggregator := NewCountValues( - inner, - labelName, - types.NewInstantQueryTimeRange(timestamp.Time(0)), - testCase.grouping, - testCase.without, - memoryConsumptionTracker, - posrange.PositionRange{}, - model.UTF8Validation, - ) + aggregator := NewCountValues(inner, labelName, types.NewInstantQueryTimeRange(timestamp.Time(0)), testCase.grouping, testCase.without, memoryConsumptionTracker, posrange.PositionRange{}) metadata, err := aggregator.SeriesMetadata(context.Background()) require.NoError(t, err) diff --git a/pkg/streamingpromql/operators/functions/factories.go b/pkg/streamingpromql/operators/functions/factories.go index 04b89bb114c..f595ccdb112 100644 --- a/pkg/streamingpromql/operators/functions/factories.go +++ b/pkg/streamingpromql/operators/functions/factories.go @@ -6,7 +6,6 @@ import ( "fmt" "math" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser/posrange" @@ -26,7 +25,6 @@ type FunctionOperatorFactory func( annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, - validationScheme model.ValidationScheme, ) (types.Operator, error) // SingleInputVectorFunctionOperatorFactory creates an InstantVectorFunctionOperatorFactory for functions @@ -36,7 +34,7 @@ type FunctionOperatorFactory func( // - name: The name of the function // - f: The function implementation func SingleInputVectorFunctionOperatorFactory(name string, f FunctionOverInstantVectorDefinition) FunctionOperatorFactory { - return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { + return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 1 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 1 argument for %s, got %v", name, len(args)) @@ -79,7 +77,7 @@ func TimeTransformationFunctionOperatorFactory(name string, seriesDataFunc Insta SeriesMetadataFunction: DropSeriesName, } - return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { + return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { var inner types.InstantVectorOperator if len(args) == 0 { // if the argument is not provided, it will default to vector(time()) @@ -133,7 +131,7 @@ func FunctionOverRangeVectorOperatorFactory( name string, f FunctionOverRangeVectorDefinition, ) FunctionOperatorFactory { - return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { + return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 1 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 1 argument for %s, got %v", name, len(args)) @@ -155,7 +153,7 @@ func FunctionOverRangeVectorOperatorFactory( } } -func PredictLinearFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { +func PredictLinearFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { f := PredictLinear if len(args) != 2 { @@ -184,7 +182,7 @@ func PredictLinearFactory(args []types.Operator, _ labels.Labels, memoryConsumpt return o, nil } -func QuantileOverTimeFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { +func QuantileOverTimeFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { f := QuantileOverTime if len(args) != 2 { @@ -213,7 +211,7 @@ func QuantileOverTimeFactory(args []types.Operator, _ labels.Labels, memoryConsu return o, nil } -func scalarToInstantVectorOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, _ types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { +func scalarToInstantVectorOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, _ types.QueryTimeRange) (types.Operator, error) { if len(args) != 1 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 1 argument for vector, got %v", len(args)) @@ -228,7 +226,7 @@ func scalarToInstantVectorOperatorFactory(args []types.Operator, _ labels.Labels return scalars.NewScalarToInstantVector(inner, expressionPosition, memoryConsumptionTracker), nil } -func LabelJoinFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, nameValidationScheme model.ValidationScheme) (types.Operator, error) { +func LabelJoinFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { // It is valid for label_join to have no source label names. ie, only 3 arguments are actually required. if len(args) < 3 { // Should be caught by the PromQL parser, but we check here for safety. @@ -266,7 +264,7 @@ func LabelJoinFunctionOperatorFactory(args []types.Operator, _ labels.Labels, me f := FunctionOverInstantVectorDefinition{ SeriesDataFunc: PassthroughData, SeriesMetadataFunction: SeriesMetadataFunctionDefinition{ - Func: LabelJoinFactory(dstLabel, separator, srcLabels, nameValidationScheme), + Func: LabelJoinFactory(dstLabel, separator, srcLabels), NeedsSeriesDeduplication: true, }, } @@ -276,7 +274,7 @@ func LabelJoinFunctionOperatorFactory(args []types.Operator, _ labels.Labels, me return operators.NewDeduplicateAndMerge(o, memoryConsumptionTracker), nil } -func LabelReplaceFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, validationScheme model.ValidationScheme) (types.Operator, error) { +func LabelReplaceFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 5 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 5 arguments for label_replace, got %v", len(args)) @@ -315,7 +313,7 @@ func LabelReplaceFunctionOperatorFactory(args []types.Operator, _ labels.Labels, f := FunctionOverInstantVectorDefinition{ SeriesDataFunc: PassthroughData, SeriesMetadataFunction: SeriesMetadataFunctionDefinition{ - Func: LabelReplaceFactory(dstLabel, replacement, srcLabel, regex, validationScheme), + Func: LabelReplaceFactory(dstLabel, replacement, srcLabel, regex), NeedsSeriesDeduplication: true, }, } @@ -325,7 +323,7 @@ func LabelReplaceFunctionOperatorFactory(args []types.Operator, _ labels.Labels, return operators.NewDeduplicateAndMerge(o, memoryConsumptionTracker), nil } -func AbsentOperatorFactory(args []types.Operator, labels labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { +func AbsentOperatorFactory(args []types.Operator, labels labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 1 { return nil, fmt.Errorf("expected exactly 1 parameter for 'absent', got %v", len(args)) } @@ -338,7 +336,7 @@ func AbsentOperatorFactory(args []types.Operator, labels labels.Labels, memoryCo return NewAbsent(inner, labels, timeRange, memoryConsumptionTracker, expressionPosition), nil } -func AbsentOverTimeOperatorFactory(args []types.Operator, labels labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { +func AbsentOverTimeOperatorFactory(args []types.Operator, labels labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 1 { return nil, fmt.Errorf("expected exactly 1 parameter for 'absent_over_time', got %v", len(args)) } @@ -351,7 +349,7 @@ func AbsentOverTimeOperatorFactory(args []types.Operator, labels labels.Labels, return NewAbsentOverTime(inner, labels, timeRange, memoryConsumptionTracker, expressionPosition), nil } -func ClampFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { +func ClampFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 3 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 3 arguments for clamp, got %v", len(args)) @@ -385,7 +383,7 @@ func ClampFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memory } func ClampMinMaxFunctionOperatorFactory(functionName string, isMin bool) FunctionOperatorFactory { - return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { + return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 2 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 2 arguments for %s, got %v", functionName, len(args)) @@ -413,7 +411,7 @@ func ClampMinMaxFunctionOperatorFactory(functionName string, isMin bool) Functio } } -func RoundFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { +func RoundFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 1 && len(args) != 2 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected 1 or 2 arguments for round, got %v", len(args)) @@ -445,7 +443,7 @@ func RoundFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memory return operators.NewDeduplicateAndMerge(o, memoryConsumptionTracker), nil } -func HistogramQuantileFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { +func HistogramQuantileFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 2 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 2 arguments for histogram_quantile, got %v", len(args)) @@ -467,7 +465,7 @@ func HistogramQuantileFunctionOperatorFactory(args []types.Operator, _ labels.La return operators.NewDeduplicateAndMerge(o, memoryConsumptionTracker), nil } -func HistogramFractionFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { +func HistogramFractionFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 3 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 3 arguments for histogram_fraction, got %v", len(args)) @@ -495,7 +493,7 @@ func HistogramFractionFunctionOperatorFactory(args []types.Operator, _ labels.La return operators.NewDeduplicateAndMerge(o, memoryConsumptionTracker), nil } -func TimestampFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { +func TimestampFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 1 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 1 argument for timestamp, got %v", len(args)) @@ -525,7 +523,7 @@ func SortByLabelOperatorFactory(descending bool) FunctionOperatorFactory { functionName = "sort_by_label_desc" } - return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { + return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) < 1 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected at least 1 argument for %s, got %v", functionName, len(args)) @@ -566,7 +564,7 @@ func SortOperatorFactory(descending bool) FunctionOperatorFactory { functionName = "sort_desc" } - return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { + return func(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 1 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 1 argument for %s, got %v", functionName, len(args)) @@ -619,7 +617,7 @@ func RegisterFunction(function Function, name string, returnType parser.ValueTyp return nil } -func piOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { +func piOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 0 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 0 arguments for pi, got %v", len(args)) @@ -628,7 +626,7 @@ func piOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumption return scalars.NewScalarConstant(math.Pi, timeRange, memoryConsumptionTracker, expressionPosition), nil } -func timeOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { +func timeOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 0 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 0 arguments for time, got %v", len(args)) @@ -637,7 +635,7 @@ func timeOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumpti return operators.NewTime(timeRange, memoryConsumptionTracker, expressionPosition), nil } -func instantVectorToScalarOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { +func instantVectorToScalarOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, _ *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { if len(args) != 1 { // Should be caught by the PromQL parser, but we check here for safety. return nil, fmt.Errorf("expected exactly 1 argument for scalar, got %v", len(args)) @@ -662,7 +660,7 @@ func UnaryNegationOfInstantVectorOperatorFactory(inner types.InstantVectorOperat return operators.NewDeduplicateAndMerge(o, memoryConsumptionTracker) } -func DoubleExponentialSmoothingFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange, _ model.ValidationScheme) (types.Operator, error) { +func DoubleExponentialSmoothingFunctionOperatorFactory(args []types.Operator, _ labels.Labels, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, timeRange types.QueryTimeRange) (types.Operator, error) { f := DoubleExponentialSmoothing functionName := "double_exponential_smoothing" diff --git a/pkg/streamingpromql/operators/functions/label.go b/pkg/streamingpromql/operators/functions/label.go index 7ce5acad41c..1ec655a1ab2 100644 --- a/pkg/streamingpromql/operators/functions/label.go +++ b/pkg/streamingpromql/operators/functions/label.go @@ -18,18 +18,18 @@ import ( "github.com/grafana/mimir/pkg/util/limiter" ) -func LabelJoinFactory(dstLabelOp, separatorOp types.StringOperator, srcLabelOps []types.StringOperator, validationScheme model.ValidationScheme) SeriesMetadataFunction { +func LabelJoinFactory(dstLabelOp, separatorOp types.StringOperator, srcLabelOps []types.StringOperator) SeriesMetadataFunction { return func(seriesMetadata []types.SeriesMetadata, tracker *limiter.MemoryConsumptionTracker) ([]types.SeriesMetadata, error) { dst := dstLabelOp.GetValue() - if !labels.IsValidLabelName(dst, validationScheme) { + if !model.LabelName(dst).IsValid() { return nil, fmt.Errorf("invalid destination label name in label_join(): %s", dst) } separator := separatorOp.GetValue() srcLabels := make([]string, len(srcLabelOps)) for i, op := range srcLabelOps { src := op.GetValue() - if !labels.IsValidLabelName(src, validationScheme) { - return nil, fmt.Errorf("invalid source label name in label_join(): %s", src) + if !model.LabelName(src).IsValid() { + return nil, fmt.Errorf("invalid source label name in label_join(): %s", dst) } srcLabels[i] = src } @@ -63,7 +63,7 @@ func LabelJoinFactory(dstLabelOp, separatorOp types.StringOperator, srcLabelOps } } -func LabelReplaceFactory(dstLabelOp, replacementOp, srcLabelOp, regexOp types.StringOperator, validationScheme model.ValidationScheme) SeriesMetadataFunction { +func LabelReplaceFactory(dstLabelOp, replacementOp, srcLabelOp, regexOp types.StringOperator) SeriesMetadataFunction { return func(seriesMetadata []types.SeriesMetadata, tracker *limiter.MemoryConsumptionTracker) ([]types.SeriesMetadata, error) { regexStr := regexOp.GetValue() regex, err := regexp.Compile("^(?s:" + regexStr + ")$") @@ -71,7 +71,7 @@ func LabelReplaceFactory(dstLabelOp, replacementOp, srcLabelOp, regexOp types.St return nil, fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr) } dst := dstLabelOp.GetValue() - if !labels.IsValidLabelName(dst, validationScheme) { + if !model.LabelName(dst).IsValid() { return nil, fmt.Errorf("invalid destination label name in label_replace(): %s", dst) } repl := replacementOp.GetValue() diff --git a/pkg/streamingpromql/planning.go b/pkg/streamingpromql/planning.go index 7f87e4eea47..0300217a469 100644 --- a/pkg/streamingpromql/planning.go +++ b/pkg/streamingpromql/planning.go @@ -441,7 +441,7 @@ func findFunction(name string) (functions.Function, bool) { // Materialize converts a query plan into an executable query. func (e *Engine) Materialize(ctx context.Context, plan *planning.QueryPlan, queryable storage.Queryable, opts promql.QueryOpts) (promql.Query, error) { if opts == nil { - opts = promql.NewPrometheusQueryOpts(false, 0, model.UTF8Validation) + opts = promql.NewPrometheusQueryOpts(false, 0) } queryID, err := e.activeQueryTracker.Insert(ctx, plan.OriginalExpression+" # (materialization)") @@ -467,7 +467,6 @@ func (e *Engine) Materialize(ctx context.Context, plan *planning.QueryPlan, quer Annotations: q.annotations, LookbackDelta: q.lookbackDelta, EagerLoadSelectors: q.engine.eagerLoadSelectors, - NameValidationScheme: q.nameValidationScheme, } q.statement = &parser.EvalStmt{ diff --git a/pkg/streamingpromql/planning/core/aggregate_expression.go b/pkg/streamingpromql/planning/core/aggregate_expression.go index bd67756c425..d51cdc375d0 100644 --- a/pkg/streamingpromql/planning/core/aggregate_expression.go +++ b/pkg/streamingpromql/planning/core/aggregate_expression.go @@ -151,16 +151,7 @@ func (a *AggregateExpression) OperatorFactory(children []types.Operator, timeRan return nil, fmt.Errorf("expected StringOperator as parameter child of AggregateExpression with operation %s, got %T", a.Op.String(), children[0]) } - o = aggregations.NewCountValues( - inner, - param, - timeRange, - a.Grouping, - a.Without, - params.MemoryConsumptionTracker, - a.ExpressionPosition.ToPrometheusType(), - params.NameValidationScheme, - ) + o = aggregations.NewCountValues(inner, param, timeRange, a.Grouping, a.Without, params.MemoryConsumptionTracker, a.ExpressionPosition.ToPrometheusType()) default: if len(children) != 1 { diff --git a/pkg/streamingpromql/planning/core/function_call.go b/pkg/streamingpromql/planning/core/function_call.go index 382f8131a7d..d94bfd6bb96 100644 --- a/pkg/streamingpromql/planning/core/function_call.go +++ b/pkg/streamingpromql/planning/core/function_call.go @@ -94,7 +94,7 @@ func (f *FunctionCall) OperatorFactory(children []types.Operator, timeRange type absentLabels = mimirpb.FromLabelAdaptersToLabels(f.AbsentLabels) } - o, err := fnc.OperatorFactory(children, absentLabels, params.MemoryConsumptionTracker, params.Annotations, f.ExpressionPosition.ToPrometheusType(), timeRange, params.NameValidationScheme) + o, err := fnc.OperatorFactory(children, absentLabels, params.MemoryConsumptionTracker, params.Annotations, f.ExpressionPosition.ToPrometheusType(), timeRange) if err != nil { return nil, err } diff --git a/pkg/streamingpromql/planning/plan.go b/pkg/streamingpromql/planning/plan.go index 934321f2f41..8589dfd0def 100644 --- a/pkg/streamingpromql/planning/plan.go +++ b/pkg/streamingpromql/planning/plan.go @@ -9,7 +9,6 @@ import ( "time" "github.com/gogo/protobuf/proto" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" @@ -93,7 +92,6 @@ type OperatorParameters struct { Annotations *annotations.Annotations LookbackDelta time.Duration EagerLoadSelectors bool - NameValidationScheme model.ValidationScheme } func (p *QueryPlan) ToEncodedPlan(includeDescriptions bool, includeDetails bool) (*EncodedQueryPlan, error) { diff --git a/pkg/streamingpromql/query.go b/pkg/streamingpromql/query.go index 4a3bb74c83d..39860573499 100644 --- a/pkg/streamingpromql/query.go +++ b/pkg/streamingpromql/query.go @@ -14,7 +14,6 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/dskit/cancellation" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -47,7 +46,6 @@ type Query struct { annotations *annotations.Annotations stats *types.QueryStats lookbackDelta time.Duration - nameValidationScheme model.ValidationScheme // Time range of the top-level query. // Subqueries may use a different range. @@ -61,7 +59,7 @@ type Query struct { func (e *Engine) newQuery(ctx context.Context, queryable storage.Queryable, opts promql.QueryOpts, timeRange types.QueryTimeRange, originalExpression string) (*Query, error) { if opts == nil { - opts = promql.NewPrometheusQueryOpts(false, 0, model.UTF8Validation) + opts = promql.NewPrometheusQueryOpts(false, 0) } lookbackDelta := opts.LookbackDelta() @@ -74,11 +72,6 @@ func (e *Engine) newQuery(ctx context.Context, queryable storage.Queryable, opts return nil, fmt.Errorf("could not get memory consumption limit for query: %w", err) } - validationScheme, err := e.limitsProvider.GetValidationScheme(ctx) - if err != nil { - return nil, fmt.Errorf("could not get validation scheme for query: %w", err) - } - memoryConsumptionTracker := limiter.NewMemoryConsumptionTracker(ctx, maxEstimatedMemoryConsumptionPerQuery, e.queriesRejectedDueToPeakMemoryConsumption, originalExpression) stats, err := types.NewQueryStats(timeRange, e.enablePerStepStats && opts.EnablePerStepStats(), memoryConsumptionTracker) if err != nil { @@ -93,7 +86,6 @@ func (e *Engine) newQuery(ctx context.Context, queryable storage.Queryable, opts topLevelQueryTimeRange: timeRange, lookbackDelta: lookbackDelta, originalExpression: originalExpression, - nameValidationScheme: validationScheme, } return q, nil diff --git a/tools/check-for-disabled-but-supported-mqe-test-cases/main.go b/tools/check-for-disabled-but-supported-mqe-test-cases/main.go index 64cf37af051..b59849f8493 100644 --- a/tools/check-for-disabled-but-supported-mqe-test-cases/main.go +++ b/tools/check-for-disabled-but-supported-mqe-test-cases/main.go @@ -16,7 +16,6 @@ import ( "github.com/go-kit/log" "github.com/grafana/regexp" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -59,7 +58,7 @@ func run() error { } opts := streamingpromql.NewTestEngineOpts() - engine, err := streamingpromql.NewEngine(opts, streamingpromql.NewStaticQueryLimitsProvider(0, model.UTF8Validation), stats.NewQueryMetrics(nil), streamingpromql.NewQueryPlanner(opts), log.NewNopLogger()) + engine, err := streamingpromql.NewEngine(opts, streamingpromql.NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), streamingpromql.NewQueryPlanner(opts), log.NewNopLogger()) if err != nil { return fmt.Errorf("could not create engine: %w", err) } From a600d4d3aad471754fd01ca5e43f073a8a9453db Mon Sep 17 00:00:00 2001 From: Julius Hinze Date: Wed, 23 Jul 2025 17:47:32 +0200 Subject: [PATCH 07/10] distributor: refactor legacy/utf validation tests --- pkg/distributor/validate_test.go | 226 ++++++++++++++++++------------- 1 file changed, 135 insertions(+), 91 deletions(-) diff --git a/pkg/distributor/validate_test.go b/pkg/distributor/validate_test.go index 4880476f811..3db8aef3646 100644 --- a/pkg/distributor/validate_test.go +++ b/pkg/distributor/validate_test.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "net/http" + "strconv" "strings" "testing" "time" @@ -29,6 +30,7 @@ import ( "github.com/grafana/mimir/pkg/costattribution" catestutils "github.com/grafana/mimir/pkg/costattribution/testutils" "github.com/grafana/mimir/pkg/mimirpb" + "github.com/grafana/mimir/pkg/util/globalerror" "github.com/grafana/mimir/pkg/util/validation" ) @@ -78,59 +80,72 @@ func TestValidateLabels(t *testing.T) { reg := prometheus.NewPedanticRegistry() s := newSampleValidationMetrics(reg) - const userID = "testUser" - - legacyConfig := validateLabelsCfg{ - maxLabelNamesPerSeries: 3, - maxLabelNamesPerInfoSeries: 4, - maxLabelNameLength: 25, - maxLabelValueLength: 25, - validationScheme: model.LegacyValidation, - } - - utf8Config := legacyConfig - utf8Config.validationScheme = model.UTF8Validation + var cfg validateLabelsCfg + userID := "testUser" + cfg.maxLabelValueLength = 25 + cfg.maxLabelNameLength = 25 + cfg.maxLabelNamesPerSeries = 3 + cfg.maxLabelNamesPerInfoSeries = 4 limits := catestutils.NewMockCostAttributionLimits(0, userID, "team") careg := prometheus.NewRegistry() manager, err := costattribution.NewManager(5*time.Second, 10*time.Second, log.NewNopLogger(), limits, reg, careg) require.NoError(t, err) cast := manager.SampleTracker(userID) - for _, c := range []struct { + validationSchemes := []model.ValidationScheme{ + model.LegacyValidation, + model.UTF8Validation, + } + + // alwaysErr ensures this error is returned for legacy and utf8 validation. + alwaysErr := func(err error) func(model.ValidationScheme) error { + return func(model.ValidationScheme) error { + return err + } + } + + // legacyErr ensures err is only returned when legacy validation scheme is used. + legacyErr := func(err error) func(model.ValidationScheme) error { + return func(scheme model.ValidationScheme) error { + if scheme == model.LegacyValidation { + return err + } + return nil + } + } + + testCases := []struct { metric model.Metric skipLabelNameValidation bool skipLabelCountValidation bool - config validateLabelsCfg - err error + wantErr func(model.ValidationScheme) error }{ { metric: map[model.LabelName]model.LabelValue{"team": "a"}, skipLabelNameValidation: false, skipLabelCountValidation: false, - config: legacyConfig, - err: errors.New(noMetricNameMsgFormat), + wantErr: alwaysErr(errors.New(noMetricNameMsgFormat)), }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: " ", "team": "a"}, skipLabelNameValidation: false, skipLabelCountValidation: false, - config: legacyConfig, - err: fmt.Errorf(invalidMetricNameMsgFormat, " "), + wantErr: legacyErr(fmt.Errorf(invalidMetricNameMsgFormat, " ")), }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "metric_name_with_\xb0_invalid_utf8_\xb0", "team": "a"}, skipLabelNameValidation: false, skipLabelCountValidation: false, - config: legacyConfig, - err: fmt.Errorf(invalidMetricNameMsgFormat, "metric_name_with__invalid_utf8_ (non-ascii characters removed)"), + wantErr: alwaysErr( + fmt.Errorf(invalidMetricNameMsgFormat, "metric_name_with__invalid_utf8_ (non-ascii characters removed)"), + ), }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "valid", "foo ": "bar", "team": "a"}, skipLabelNameValidation: false, skipLabelCountValidation: false, - config: legacyConfig, - err: fmt.Errorf( + wantErr: legacyErr(fmt.Errorf( invalidLabelMsgFormat, "foo ", mimirpb.FromLabelAdaptersToString( @@ -140,21 +155,18 @@ func TestValidateLabels(t *testing.T) { {Name: "team", Value: "a"}, }, ), - ), + )), }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "valid", "team": "c"}, skipLabelNameValidation: false, skipLabelCountValidation: false, - config: legacyConfig, - err: nil, }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "badLabelName", "this_is_a_really_really_long_name_that_should_cause_an_error": "test_value_please_ignore", "team": "biz"}, skipLabelNameValidation: false, skipLabelCountValidation: false, - config: legacyConfig, - err: fmt.Errorf( + wantErr: alwaysErr(fmt.Errorf( labelNameTooLongMsgFormat, "this_is_a_really_really_long_name_that_should_cause_an_error", mimirpb.FromLabelAdaptersToString( @@ -164,14 +176,13 @@ func TestValidateLabels(t *testing.T) { {Name: "this_is_a_really_really_long_name_that_should_cause_an_error", Value: "test_value_please_ignore"}, }, ), - ), + )), }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "badLabelValue", "much_shorter_name": "test_value_please_ignore_no_really_nothing_to_see_here", "team": "biz"}, skipLabelNameValidation: false, skipLabelCountValidation: false, - config: legacyConfig, - err: LabelValueTooLongError{ + wantErr: alwaysErr(LabelValueTooLongError{ Label: mimirpb.LabelAdapter{Name: "much_shorter_name", Value: "test_value_please_ignore_no_really_nothing_to_see_here"}, Limit: 25, Series: []mimirpb.LabelAdapter{ @@ -179,14 +190,13 @@ func TestValidateLabels(t *testing.T) { {Name: "much_shorter_name", Value: "test_value_please_ignore_no_really_nothing_to_see_here"}, {Name: "team", Value: "biz"}, }, - }, + }), }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "bar": "baz", "blip": "blop", "team": "plof"}, skipLabelNameValidation: false, skipLabelCountValidation: false, - config: legacyConfig, - err: fmt.Errorf( + wantErr: alwaysErr(fmt.Errorf( tooManyLabelsMsgFormat, tooManyLabelsArgs( []mimirpb.LabelAdapter{ @@ -197,23 +207,20 @@ func TestValidateLabels(t *testing.T) { }, 3, )..., - ), + )), }, { // *_info metrics have higher label limits. metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo_info", "bar": "baz", "blip": "blop", "team": "a"}, skipLabelNameValidation: false, skipLabelCountValidation: false, - config: legacyConfig, - err: nil, }, { // *_info metrics have higher label limits. metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo_info", "bar": "baz", "blip": "blop", "blap": "blup", "team": "a"}, skipLabelNameValidation: false, skipLabelCountValidation: false, - config: legacyConfig, - err: fmt.Errorf( + wantErr: alwaysErr(fmt.Errorf( tooManyInfoLabelsMsgFormat, tooManyLabelsArgs( []mimirpb.LabelAdapter{ @@ -225,52 +232,42 @@ func TestValidateLabels(t *testing.T) { }, 4, )..., - ), + )), }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "bar": "baz", "blip": "blop", "team": "a"}, skipLabelNameValidation: false, skipLabelCountValidation: true, - config: legacyConfig, - err: nil, }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "invalid%label&name": "bar", "team": "biz"}, skipLabelNameValidation: true, skipLabelCountValidation: false, - config: legacyConfig, - err: nil, }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "label1": "你好", "team": "plof"}, skipLabelNameValidation: false, skipLabelCountValidation: false, - config: legacyConfig, - err: nil, }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "label1": "abc\xfe\xfddef", "team": "plof"}, skipLabelNameValidation: false, skipLabelCountValidation: false, - config: legacyConfig, - err: fmt.Errorf( + wantErr: alwaysErr(fmt.Errorf( invalidLabelValueMsgFormat, "label1", "abc\ufffddef", "foo", - ), + )), }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "label1": "abc\xfe\xfddef"}, skipLabelNameValidation: true, skipLabelCountValidation: false, - config: legacyConfig, - err: nil, }, { metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "name😀": "value", "team": "b"}, skipLabelNameValidation: false, skipLabelCountValidation: false, - config: legacyConfig, - err: fmt.Errorf( + wantErr: legacyErr(fmt.Errorf( invalidLabelMsgFormat, "name😀", mimirpb.FromLabelAdaptersToString( @@ -280,57 +277,104 @@ func TestValidateLabels(t *testing.T) { {Name: "team", Value: "b"}, }, ), - ), + )), }, { - metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "foo", "name😀": "value", "team": "b"}, + metric: map[model.LabelName]model.LabelValue{model.MetricNameLabel: "name😀", "team": "b"}, skipLabelNameValidation: false, skipLabelCountValidation: false, - config: utf8Config, + wantErr: legacyErr(fmt.Errorf( + invalidMetricNameMsgFormat, "name (non-ascii characters removed)"), + ), }, - } { - err := validateLabels(s, c.config, userID, "custom label", mimirpb.FromMetricsToLabelAdapters(c.metric), c.skipLabelNameValidation, c.skipLabelCountValidation, cast, ts) - assert.Equal(t, c.err, err, "wrong error") + } + + for i, c := range testCases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + for _, scheme := range validationSchemes { + t.Run(scheme.String(), func(t *testing.T) { + testConfig := cfg + testConfig.validationScheme = scheme + var wantErr error + if c.wantErr != nil { + wantErr = c.wantErr(scheme) + } + err := validateLabels(s, testConfig, userID, "custom label", mimirpb.FromMetricsToLabelAdapters(c.metric), c.skipLabelNameValidation, c.skipLabelCountValidation, cast, ts) + assert.Equal(t, wantErr, err, "wrong error") + }) + } + }) + } + + discardedSamplesValues := map[string]map[string]int{} + for _, c := range testCases { + if c.wantErr == nil { + continue + } + for _, scheme := range validationSchemes { + if err := c.wantErr(scheme); err != nil { + for _, id := range []globalerror.ID{ + globalerror.SeriesInvalidLabel, + globalerror.SeriesInvalidLabelValue, + globalerror.SeriesLabelNameTooLong, + globalerror.SeriesLabelValueTooLong, + globalerror.MaxLabelNamesPerSeries, + globalerror.MaxLabelNamesPerInfoSeries, + globalerror.InvalidMetricName, + globalerror.MissingMetricName, + } { + if strings.Contains(err.Error(), string(id)) { + if discardedSamplesValues[id.LabelValue()] == nil { + discardedSamplesValues[id.LabelValue()] = map[string]int{} + } + team := string(c.metric["team"]) + discardedSamplesValues[id.LabelValue()][team]++ + } + } + } + } } randomReason := validation.DiscardedSamplesCounter(reg, "random reason") randomReason.WithLabelValues("different user", "custom label").Inc() - require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` - # HELP cortex_discarded_samples_total The total number of samples that were discarded. - # TYPE cortex_discarded_samples_total counter - cortex_discarded_samples_total{group="custom label",reason="label_invalid",user="testUser"} 2 - cortex_discarded_samples_total{group="custom label",reason="label_name_too_long",user="testUser"} 1 - cortex_discarded_samples_total{group="custom label",reason="label_value_invalid",user="testUser"} 1 - cortex_discarded_samples_total{group="custom label",reason="label_value_too_long",user="testUser"} 1 - cortex_discarded_samples_total{group="custom label",reason="max_label_names_per_series",user="testUser"} 1 - cortex_discarded_samples_total{group="custom label",reason="max_label_names_per_info_series",user="testUser"} 1 - cortex_discarded_samples_total{group="custom label",reason="metric_name_invalid",user="testUser"} 2 - cortex_discarded_samples_total{group="custom label",reason="missing_metric_name",user="testUser"} 1 - cortex_discarded_samples_total{group="custom label",reason="random reason",user="different user"} 1 - `), "cortex_discarded_samples_total")) + wantDiscardedSamples := ` + # HELP cortex_discarded_samples_total The total number of samples that were discarded. + # TYPE cortex_discarded_samples_total counter + cortex_discarded_samples_total{group="custom label",reason="random reason",user="different user"} 1 + ` + wantDiscardedAttrSamples := ` + # HELP cortex_discarded_attributed_samples_total The total number of samples that were discarded per attribution. + # TYPE cortex_discarded_attributed_samples_total counter + ` + + sumSamples := func(m map[string]int) (sum int) { + for _, v := range m { + sum += v + } + return + } - require.NoError(t, testutil.GatherAndCompare(careg, strings.NewReader(` - # HELP cortex_discarded_attributed_samples_total The total number of samples that were discarded per attribution. - # TYPE cortex_discarded_attributed_samples_total counter - cortex_discarded_attributed_samples_total{reason="label_invalid",team="a",tenant="testUser",tracker="cost-attribution"} 1 - cortex_discarded_attributed_samples_total{reason="label_invalid",team="b",tenant="testUser",tracker="cost-attribution"} 1 - cortex_discarded_attributed_samples_total{reason="label_name_too_long",team="biz",tenant="testUser",tracker="cost-attribution"} 1 - cortex_discarded_attributed_samples_total{reason="label_value_invalid",team="plof",tenant="testUser",tracker="cost-attribution"} 1 - cortex_discarded_attributed_samples_total{reason="label_value_too_long",team="biz",tenant="testUser",tracker="cost-attribution"} 1 - cortex_discarded_attributed_samples_total{reason="max_label_names_per_info_series",team="a",tenant="testUser",tracker="cost-attribution"} 1 - cortex_discarded_attributed_samples_total{reason="max_label_names_per_series",team="plof",tenant="testUser",tracker="cost-attribution"} 1 - cortex_discarded_attributed_samples_total{reason="metric_name_invalid",team="a",tenant="testUser",tracker="cost-attribution"} 2 - cortex_discarded_attributed_samples_total{reason="missing_metric_name",team="a",tenant="testUser",tracker="cost-attribution"} 1 -`), "cortex_discarded_attributed_samples_total")) + for reason, countByTeam := range discardedSamplesValues { + wantDiscardedSamples += fmt.Sprintf( + `cortex_discarded_samples_total{group="custom label",reason="%s",user="testUser"} %d`+"\n", + reason, + sumSamples(countByTeam), + ) + for team, count := range countByTeam { + wantDiscardedAttrSamples += fmt.Sprintf( + `cortex_discarded_attributed_samples_total{reason="%s",team="%s",tenant="testUser",tracker="cost-attribution"} %d`+"\n", + reason, + team, + count, + ) + } + } - s.deleteUserMetrics(userID) + require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(wantDiscardedSamples), "cortex_discarded_samples_total")) + require.NoError(t, testutil.GatherAndCompare(careg, strings.NewReader(wantDiscardedAttrSamples), "cortex_discarded_attributed_samples_total")) - require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` - # HELP cortex_discarded_samples_total The total number of samples that were discarded. - # TYPE cortex_discarded_samples_total counter - cortex_discarded_samples_total{group="custom label",reason="random reason",user="different user"} 1 - `), "cortex_discarded_samples_total")) + s.deleteUserMetrics(userID) } func TestValidateExemplars(t *testing.T) { From b079740426c7ab7040b1a98112ea59ca97760c3d Mon Sep 17 00:00:00 2001 From: Julius Hinze Date: Wed, 23 Jul 2025 17:48:34 +0200 Subject: [PATCH 08/10] querymiddleware: utf8/legacy tests for MetricsQueryRequestValidationRoundTripper --- .../querymiddleware/request_validation_test.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/pkg/frontend/querymiddleware/request_validation_test.go b/pkg/frontend/querymiddleware/request_validation_test.go index 882fe65cea6..774289b3322 100644 --- a/pkg/frontend/querymiddleware/request_validation_test.go +++ b/pkg/frontend/querymiddleware/request_validation_test.go @@ -70,6 +70,21 @@ func TestMetricsQueryRequestValidationRoundTripper(t *testing.T) { url: instantQueryPathSuffix + "?query=up&start=123&end=456&step=60s", expectedErrType: "", }, + { + // accepts utf-8 label names + url: instantQueryPathSuffix + `?query=up{"test.label"="test"}`, + expectedErrType: "", + }, + { + // accepts utf-8 metric name + url: instantQueryPathSuffix + `?query={"test.label"}`, + expectedErrType: "", + }, + { + // invalid utf-8 string + url: instantQueryPathSuffix + "?query=up{\"test.label\"=\"\xff\"}", + expectedErrType: apierror.TypeBadData, + }, } { t.Run(strconv.Itoa(i), func(t *testing.T) { req, err := http.NewRequest(http.MethodGet, srv.URL+tc.url, nil) From 63c1a4edb9a60a8638c1d4e52cee0b6a06ac9ba3 Mon Sep 17 00:00:00 2001 From: Julius Hinze Date: Wed, 23 Jul 2025 17:58:55 +0200 Subject: [PATCH 09/10] chore: update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 534e7f045aa..fdd02f94af6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,8 @@ ### Grafana Mimir +* [CHANGE] Query-frontend: Add support for UTF-8 label and metric names in `/api/v1/cardinality/{label_values|label_values|active_series}` endpoints. #11848. +* [CHANGE] Querier: Add support for UTF-8 label and metric names in `label_join`, `label_replace` and `count_values` PromQL functions. #11848. * [CHANGE] Remove support for Redis as a cache backend. #12163 * [CHANGE] Memcached: Remove experimental `-.memcached.addresses-provider` flag to use alternate DNS service discovery backends. The more reliable backend introduced in 2.16.0 (#10895) is now the default. As a result of this change, DNS-based cache service discovery no longer supports search domains. #12175 * [FEATURE] Distributor: Add experimental `-distributor.otel-native-delta-ingestion` option to allow primitive delta metrics ingestion via the OTLP endpoint. #11631 From 65bc6f84d83f208280a8c5a9a5b5d98e7a330f0c Mon Sep 17 00:00:00 2001 From: Julius Hinze Date: Thu, 24 Jul 2025 13:14:34 +0200 Subject: [PATCH 10/10] util/validation: set relabel config validation scheme --- pkg/util/validation/limits.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index a6c050c75d5..ba540a0d180 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -561,6 +561,8 @@ func (l *Limits) validate() error { if cfg == nil { return errors.New("invalid metric_relabel_configs") } + // TODO: when we make validation scheme configurable, set + // cfg.MetricNameValidationScheme to match that value. } if l.MaxEstimatedChunksPerQueryMultiplier < 1 && l.MaxEstimatedChunksPerQueryMultiplier != 0 { @@ -1056,7 +1058,12 @@ func (o *Overrides) CompactorBlockUploadMaxBlockSizeBytes(userID string) int64 { // MetricRelabelConfigs returns the metric relabel configs for a given user. func (o *Overrides) MetricRelabelConfigs(userID string) []*relabel.Config { - return o.getOverridesForUser(userID).MetricRelabelConfigs + relabelConfigs := o.getOverridesForUser(userID).MetricRelabelConfigs + validationScheme := o.ValidationScheme(userID) + for i := range relabelConfigs { + relabelConfigs[i].MetricNameValidationScheme = validationScheme + } + return relabelConfigs } func (o *Overrides) MetricRelabelingEnabled(userID string) bool {