From 90bc63a880bd40e689b12a6444483e3259f8ca2f Mon Sep 17 00:00:00 2001 From: SungJin1212 Date: Fri, 8 Aug 2025 15:21:34 +0900 Subject: [PATCH] Update prometheus to main (1e4144a496fb552bde5e3b325422b10707ac331e) Signed-off-by: SungJin1212 --- go.mod | 16 +- go.sum | 36 +- pkg/api/handlers.go | 1 + pkg/querier/error_translate_queryable_test.go | 1 + pkg/querier/stats_renderer_test.go | 1 + .../aws/aws-sdk-go-v2/aws/config.go | 4 + .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/aws/signer/v4/stream.go | 2 +- .../internal/configsources/CHANGELOG.md | 18 + .../configsources/go_module_metadata.go | 2 +- .../endpoints/awsrulesfn/partitions.go | 82 +++- .../endpoints/awsrulesfn/partitions.json | 22 + .../internal/endpoints/v2/CHANGELOG.md | 18 + .../endpoints/v2/go_module_metadata.go | 2 +- .../internal/accept-encoding/CHANGELOG.md | 8 + .../accept-encoding/go_module_metadata.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 18 + .../presigned-url/go_module_metadata.go | 2 +- vendor/github.com/aws/smithy-go/CHANGELOG.md | 24 +- vendor/github.com/aws/smithy-go/Makefile | 20 +- vendor/github.com/aws/smithy-go/README.md | 4 +- .../aws/smithy-go/endpoints/endpoint.go | 2 +- .../aws/smithy-go/go_module_metadata.go | 2 +- vendor/github.com/aws/smithy-go/modman.toml | 1 - .../smithy-go/transport/http/interceptor.go | 321 ++++++++++++++ .../transport/http/interceptor_middleware.go | 325 ++++++++++++++ .../prometheus/otlptranslator/README.md | 122 +++++- .../prometheus/otlptranslator/doc.go | 24 ++ .../prometheus/otlptranslator/label_namer.go | 90 ++++ .../prometheus/otlptranslator/metric_namer.go | 145 +++++-- .../otlptranslator/normalize_label.go | 57 --- .../prometheus/otlptranslator/strategy.go | 86 ++++ .../prometheus/otlptranslator/unit_namer.go | 24 +- .../prometheus/prometheus/config/config.go | 100 ++--- .../prometheus/discovery/discovery.go | 2 +- .../discovery/metrics_k8s_client.go | 12 +- .../model/labels/labels_slicelabels.go | 4 +- .../model/labels/labels_stringlabels.go | 10 +- .../prometheus/model/labels/regexp.go | 8 +- .../model/textparse/openmetricsparse.go | 4 +- .../prometheus/model/textparse/promparse.go | 11 +- .../model/textparse/protobufparse.go | 2 +- .../prompb/io/prometheus/client/decoder.go | 20 +- .../prometheus/prometheus/promql/engine.go | 84 ++-- .../prometheus/prometheus/promql/functions.go | 395 +++++++++--------- .../prometheus/promql/parser/ast.go | 20 +- .../promql/parser/generated_parser.y | 26 +- .../promql/parser/generated_parser.y.go | 14 +- .../prometheus/promql/parser/parse.go | 39 +- .../prometheus/promql/parser/prettier.go | 2 +- .../prometheus/promql/promqltest/test.go | 8 +- .../prometheus/prometheus/promql/value.go | 4 +- .../prometheus/prometheus/rules/manager.go | 14 +- .../prometheus/prometheus/scrape/scrape.go | 8 +- .../prometheus/prometheus/storage/buffer.go | 16 +- .../prometheus/storage/interface.go | 30 +- .../prometheus/storage/remote/client.go | 2 +- .../prometheus/storage/remote/codec.go | 10 +- .../storage/remote/metadata_watcher.go | 2 +- .../prometheusremotewrite/helper.go | 153 ++++--- .../prometheusremotewrite/histograms.go | 22 +- .../prometheusremotewrite/metrics_to_prw.go | 32 +- .../number_data_points.go | 40 +- .../prometheus/storage/remote/read.go | 6 +- .../prometheus/storage/remote/storage.go | 2 +- .../prometheus/storage/remote/write.go | 6 +- .../storage/remote/write_handler.go | 26 +- .../prometheus/prometheus/storage/series.go | 12 +- .../prometheus/tsdb/chunkenc/chunk.go | 8 +- .../tsdb/chunkenc/float_histogram.go | 146 +++---- .../prometheus/tsdb/chunkenc/histogram.go | 144 +++---- .../tsdb/chunkenc/histogram_meta.go | 163 ++------ .../prometheus/tsdb/chunkenc/xor.go | 10 +- .../prometheus/tsdb/chunks/head_chunks.go | 8 +- .../prometheus/prometheus/tsdb/compact.go | 4 +- .../prometheus/prometheus/tsdb/db.go | 12 +- .../prometheus/prometheus/tsdb/exemplar.go | 4 +- .../prometheus/tsdb/fileutil/dir.go | 4 + .../tsdb/fileutil/direct_io_unsupported.go | 2 +- .../prometheus/prometheus/tsdb/head.go | 2 +- .../prometheus/prometheus/tsdb/head_append.go | 2 +- .../prometheus/prometheus/tsdb/head_other.go | 2 +- .../prometheus/prometheus/tsdb/head_read.go | 2 +- .../prometheus/prometheus/tsdb/index/index.go | 162 +------ .../prometheus/tsdb/index/postings.go | 57 +-- .../prometheus/tsdb/ooo_head_read.go | 24 +- .../prometheus/prometheus/tsdb/querier.go | 4 +- .../prometheus/tsdb/record/record.go | 36 +- .../util/annotations/annotations.go | 2 +- .../prometheus/util/compression/buffers.go | 8 +- .../prometheus/util/stats/query_stats.go | 2 +- .../prometheus/util/testutil/context.go | 4 +- .../prometheus/util/testutil/directory.go | 2 +- .../prometheus/prometheus/web/api/v1/api.go | 39 +- .../prometheus/web/api/v1/json_codec.go | 6 +- vendor/modules.txt | 16 +- 96 files changed, 2211 insertions(+), 1294 deletions(-) create mode 100644 vendor/github.com/aws/smithy-go/transport/http/interceptor.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go create mode 100644 vendor/github.com/prometheus/otlptranslator/doc.go create mode 100644 vendor/github.com/prometheus/otlptranslator/label_namer.go delete mode 100644 vendor/github.com/prometheus/otlptranslator/normalize_label.go create mode 100644 vendor/github.com/prometheus/otlptranslator/strategy.go diff --git a/go.mod b/go.mod index b4adb6e2987..0227834ce6e 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( github.com/prometheus/client_model v0.6.2 github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 // Prometheus maps version 2.x.y to tags v0.x.y. - github.com/prometheus/prometheus v0.305.1-0.20250721065454-b09cf6be8d56 + github.com/prometheus/prometheus v0.305.1-0.20250808023455-1e4144a496fb github.com/segmentio/fasthash v1.0.3 github.com/sony/gobreaker v1.0.0 github.com/spf13/afero v1.11.0 @@ -113,19 +113,19 @@ require ( github.com/alecthomas/kingpin/v2 v2.4.0 // indirect github.com/andybalholm/brotli v1.1.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect + github.com/aws/aws-sdk-go-v2 v1.37.0 // indirect github.com/aws/aws-sdk-go-v2/config v1.29.15 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.68 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.33.20 // indirect - github.com/aws/smithy-go v1.22.3 // indirect + github.com/aws/smithy-go v1.22.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/caio/go-tdigest v3.1.0+incompatible // indirect @@ -227,7 +227,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus-community/prom-label-proxy v0.11.1 // indirect github.com/prometheus/exporter-toolkit v0.14.0 // indirect - github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588 // indirect + github.com/prometheus/otlptranslator v0.0.0-20250731173911-a9673827589a // indirect github.com/prometheus/sigv4 v0.2.0 // indirect github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect github.com/rantav/go-grpc-channelz v0.0.4 // indirect diff --git a/go.sum b/go.sum index b18f8637c01..552c32323c5 100644 --- a/go.sum +++ b/go.sum @@ -151,32 +151,36 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= -github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2 v1.37.0 h1:YtCOESR/pN4j5oA7cVHSfOwIcuh/KwHC4DOSXFbv5F0= +github.com/aws/aws-sdk-go-v2 v1.37.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= github.com/aws/aws-sdk-go-v2/config v1.29.15 h1:I5XjesVMpDZXZEZonVfjI12VNMrYa38LtLnw4NtY5Ss= github.com/aws/aws-sdk-go-v2/config v1.29.15/go.mod h1:tNIp4JIPonlsgaO5hxO372a6gjhN63aSWl2GVl5QoBQ= github.com/aws/aws-sdk-go-v2/credentials v1.17.68 h1:cFb9yjI02/sWHBSYXAtkamjzCuRymvmeFmt0TC0MbYY= github.com/aws/aws-sdk-go-v2/credentials v1.17.68/go.mod h1:H6E+jBzyqUu8u0vGaU6POkK3P0NylYEeRZ6ynBpMqIk= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 h1:H2iZoqW/v2Jnrh1FnU725Bq6KJ0k2uP63yH+DcY+HUI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0/go.mod h1:L0FqLbwMXHvNC/7crWV1iIxUlOKYZUE8KuTIA+TozAI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 h1:EDped/rNzAhFPhVY0sDGbtD16OKqksfA8OjF/kLEgw8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0/go.mod h1:uUI335jvzpZRPpjYx6ODc/wg1qH+NnoSTK/FwVeK0C0= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.237.0 h1:XHE2G+yaDQql32FZt19QmQt4WuisqQJIkMUSCxeCUl8= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.237.0/go.mod h1:t11/j/nH9i6bbsPH9xc04BJOsV2nVPUqrB67/TLDsyM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 h1:eRhU3Sh8dGbaniI6B+I48XJMrTPRkK4DKo+vqIxziOU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0/go.mod h1:paNLV18DZ6FnWE/bd06RIKPDIFpjuvCkGKWTG/GDBeM= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.44.0 h1:QiiCqpKy0prxq+92uWfESzcb7/8Y9JAamcMOzVYLEoM= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.44.0/go.mod h1:ESppxYqXQCpCY+KWl3BdkQjmsQX6zxKP39SnDtRDoU0= github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= github.com/aws/aws-sdk-go-v2/service/sts v1.33.20 h1:oIaQ1e17CSKaWmUTu62MtraRWVIosn/iONMuZt0gbqc= github.com/aws/aws-sdk-go-v2/service/sts v1.33.20/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= -github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k= -github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= +github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/baidubce/bce-sdk-go v0.9.230 h1:HzELBKiD7QAgYqZ1qHZexoI2A3Lo/6zYGQFvcUbS5cA= github.com/baidubce/bce-sdk-go v0.9.230/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= @@ -852,8 +856,8 @@ github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 h1:R/zO7ombSH github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA= -github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588 h1:QlySqDdSESgWDePeAYskbbcKKdowI26m9aU9zloHyYE= -github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= +github.com/prometheus/otlptranslator v0.0.0-20250731173911-a9673827589a h1:r2csuCATbgDz2Nk2PkKo7b6x7ErrF3NMmxwH0fifqN8= +github.com/prometheus/otlptranslator v0.0.0-20250731173911-a9673827589a/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -861,8 +865,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/prometheus/prometheus v0.305.1-0.20250721065454-b09cf6be8d56 h1:F7rkXwWiujBbpql4Syxr1bbbaQf/ePB24BInELXpAQc= -github.com/prometheus/prometheus v0.305.1-0.20250721065454-b09cf6be8d56/go.mod h1:7hMSGyZHt0dcmZ5r4kFPJ/vxPQU99N5/BGwSPDxeZrQ= +github.com/prometheus/prometheus v0.305.1-0.20250808023455-1e4144a496fb h1:azXJoaVT+S7PRdbdUwtyivhaGq++ZF5YTkk1XlTaZkw= +github.com/prometheus/prometheus v0.305.1-0.20250808023455-1e4144a496fb/go.mod h1:nFT/lsJGZPCe1mC6uLIoDuK2bP9JO9DBHIDPQsuZucQ= github.com/prometheus/sigv4 v0.2.0 h1:qDFKnHYFswJxdzGeRP63c4HlH3Vbn1Yf/Ao2zabtVXk= github.com/prometheus/sigv4 v0.2.0/go.mod h1:D04rqmAaPPEUkjRQxGqjoxdyJuyCh6E0M18fZr0zBiE= github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go index 0fa51bf94a4..cfa7419adb5 100644 --- a/pkg/api/handlers.go +++ b/pkg/api/handlers.go @@ -242,6 +242,7 @@ func NewQuerierHandler( false, false, querierCfg.LookbackDelta, + false, ) // Let's clear all codecs to create the instrumented ones api.ClearCodecs() diff --git a/pkg/querier/error_translate_queryable_test.go b/pkg/querier/error_translate_queryable_test.go index 03a22d52375..dbbdbfc9b59 100644 --- a/pkg/querier/error_translate_queryable_test.go +++ b/pkg/querier/error_translate_queryable_test.go @@ -178,6 +178,7 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable, engine promql.QueryE false, false, 5*time.Minute, + false, ) promRouter := route.New().WithPrefix("/api/v1") diff --git a/pkg/querier/stats_renderer_test.go b/pkg/querier/stats_renderer_test.go index 9f033486127..0b8d591c2a4 100644 --- a/pkg/querier/stats_renderer_test.go +++ b/pkg/querier/stats_renderer_test.go @@ -92,6 +92,7 @@ func Test_StatsRenderer(t *testing.T) { false, false, 5*time.Minute, + false, ) promRouter := route.New().WithPrefix("/api/v1") diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index a015cc5b20c..7098087408f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -6,6 +6,7 @@ import ( smithybearer "github.com/aws/smithy-go/auth/bearer" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" ) // HTTPClient provides the interface to provide custom HTTPClients. Generally @@ -192,6 +193,9 @@ type Config struct { // This variable is sourced from environment variable AWS_RESPONSE_CHECKSUM_VALIDATION or // the shared config profile attribute "response_checksum_validation". ResponseChecksumValidation ResponseChecksumValidation + + // Registry of HTTP interceptors. + Interceptors smithyhttp.InterceptorRegistry } // NewConfig returns a new Config pointer that can be chained with builder diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 8e930fc6f87..af3a23a5253 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.36.3" +const goModuleVersion = "1.37.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go index 66aa2bd6ab0..32875e07798 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go @@ -59,7 +59,7 @@ func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte prevSignature := s.prevSignature - st := v4Internal.NewSigningTime(signingTime) + st := v4Internal.NewSigningTime(signingTime.UTC()) sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index eae3e16af7d..b604152d5fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,21 @@ +# v1.4.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.37 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.36 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.35 (2025-06-10) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.34 (2025-02-27) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index eddabe6344c..c2c39f91937 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.34" +const goModuleVersion = "1.4.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go index 5f0779997de..619c1f5d8f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -11,7 +11,7 @@ func GetPartition(region string) *PartitionConfig { var partitions = []Partition{ { ID: "aws", - RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", + RegionRegex: "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ Name: "aws", DnsSuffix: "amazonaws.com", @@ -35,6 +35,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ap-east-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "ap-northeast-1": { Name: nil, DnsSuffix: nil, @@ -98,6 +105,20 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ap-southeast-5": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-7": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "aws-global": { Name: nil, DnsSuffix: nil, @@ -196,6 +217,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "mx-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "sa-east-1": { Name: nil, DnsSuffix: nil, @@ -378,6 +406,13 @@ var partitions = []Partition{ ImplicitGlobalRegion: "eu-isoe-west-1", }, Regions: map[string]RegionOverrides{ + "aws-iso-e-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "eu-isoe-west-1": { Name: nil, DnsSuffix: nil, @@ -398,6 +433,49 @@ var partitions = []Partition{ SupportsDualStack: false, ImplicitGlobalRegion: "us-isof-south-1", }, - Regions: map[string]RegionOverrides{}, + Regions: map[string]RegionOverrides{ + "aws-iso-f-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-isof-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-isof-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-eusc", + RegionRegex: "^eusc\\-(de)\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-eusc", + DnsSuffix: "amazonaws.eu", + DualStackDnsSuffix: "amazonaws.eu", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "eusc-de-east-1", + }, + Regions: map[string]RegionOverrides{ + "eusc-de-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index e19224f1b86..456b07fca67 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -17,6 +17,9 @@ "ap-east-1" : { "description" : "Asia Pacific (Hong Kong)" }, + "ap-east-2" : { + "description" : "Asia Pacific (Taipei)" + }, "ap-northeast-1" : { "description" : "Asia Pacific (Tokyo)" }, @@ -208,6 +211,9 @@ }, "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", "regions" : { + "aws-iso-e-global" : { + "description" : "AWS ISOE (Europe) global region" + }, "eu-isoe-west-1" : { "description" : "EU ISOE West" } @@ -234,6 +240,22 @@ "description" : "US ISOF SOUTH" } } + }, { + "id" : "aws-eusc", + "outputs" : { + "dnsSuffix" : "amazonaws.eu", + "dualStackDnsSuffix" : "amazonaws.eu", + "implicitGlobalRegion" : "eusc-de-east-1", + "name" : "aws-eusc", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", + "regions" : { + "eusc-de-east-1" : { + "description" : "EU (Germany)" + } + } } ], "version" : "1.1" } \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 83e5bd28a72..4760d92ef7e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,21 @@ +# v2.7.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.37 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.36 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.35 (2025-06-10) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.6.34 (2025-02-27) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 735dba7ac79..056246dc4c0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.6.34" +const goModuleVersion = "2.7.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index c81265a25df..32c9d515746 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.13.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. + +# v1.12.4 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. + # v1.12.3 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index d83e533effd..f4b9f0b9488 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.3" +const goModuleVersion = "1.13.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 2b5ceb4b512..869246098ed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,21 @@ +# v1.13.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.18 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.17 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.16 (2025-06-10) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.12.15 (2025-02-27) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index a165a100f8d..beae329a8f3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.15" +const goModuleVersion = "1.13.0" diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 4df632dce80..1d60def6d1b 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,13 +1,33 @@ -# Release (2025-02-17) +# Release (2025-07-24) ## General Highlights * **Dependency Update**: Updated to the latest SDK module versions ## Module Highlights -* `github.com/aws/smithy-go`: v1.22.3 +* `github.com/aws/smithy-go`: v1.22.5 + * **Bug Fix**: Fix HTTP metrics data race. + * **Bug Fix**: Replace usages of deprecated ioutil package. + +# Release (2025-06-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.4 + * **Bug Fix**: Fix CBOR serd empty check for string and enum fields * **Bug Fix**: Fix HTTP metrics data race. * **Bug Fix**: Replace usages of deprecated ioutil package. +# Release (2025-02-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.3 + * **Dependency Update**: Bump minimum Go version to 1.22 per our language support policy. + # Release (2025-01-21) ## General Highlights diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile index a3c2cf173de..34b17ab2fe0 100644 --- a/vendor/github.com/aws/smithy-go/Makefile +++ b/vendor/github.com/aws/smithy-go/Makefile @@ -30,6 +30,24 @@ smithy-build: smithy-clean: cd codegen && ./gradlew clean +GRADLE_RETRIES := 3 +GRADLE_SLEEP := 2 + +# We're making a call to ./gradlew to trigger downloading Gradle and +# starting the daemon. Any call works, so using `./gradlew help` +ensure-gradle-up: + @cd codegen && for i in $(shell seq 1 $(GRADLE_RETRIES)); do \ + echo "Checking if Gradle daemon is up, attempt $$i..."; \ + if ./gradlew help; then \ + echo "Gradle daemon is up!"; \ + exit 0; \ + fi; \ + echo "Failed to start Gradle, retrying in $(GRADLE_SLEEP) seconds..."; \ + sleep $(GRADLE_SLEEP); \ + done; \ + echo "Failed to start Gradle after $(GRADLE_RETRIES) attempts."; \ + exit 1 + ################## # Linting/Verify # ################## @@ -51,12 +69,10 @@ cover: .PHONY: unit unit-race unit-test unit-race-test unit: verify - go vet ${BUILD_TAGS} --all ./... && \ go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ go test -timeout=1m ${UNIT_TEST_TAGS} ./... unit-race: verify - go vet ${BUILD_TAGS} --all ./... && \ go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md index 08df74589a8..c9ba5ea5e4b 100644 --- a/vendor/github.com/aws/smithy-go/README.md +++ b/vendor/github.com/aws/smithy-go/README.md @@ -4,7 +4,7 @@ [Smithy](https://smithy.io/) code generators for Go and the accompanying smithy-go runtime. -The smithy-go runtime requires a minimum version of Go 1.20. +The smithy-go runtime requires a minimum version of Go 1.22. **WARNING: All interfaces are subject to change.** @@ -77,7 +77,7 @@ example created from `smithy init`: "service": "example.weather#Weather", "module": "github.com/example/weather", "generateGoMod": true, - "goDirective": "1.20" + "goDirective": "1.22" } } } diff --git a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go index a9352839748..f778272be30 100644 --- a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go +++ b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go @@ -9,7 +9,7 @@ import ( // Endpoint is the endpoint object returned by Endpoint resolution V2 type Endpoint struct { - // The complete URL minimally specfiying the scheme and host. + // The complete URL minimally specifying the scheme and host. // May optionally specify the port and base path component. URI url.URL diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index d12d95891d2..cbbaabee9ef 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.22.3" +const goModuleVersion = "1.22.5" diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml index 9d94b7cbd0a..aac582fa2ce 100644 --- a/vendor/github.com/aws/smithy-go/modman.toml +++ b/vendor/github.com/aws/smithy-go/modman.toml @@ -1,5 +1,4 @@ [dependencies] - "github.com/jmespath/go-jmespath" = "v0.4.0" [modules] diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go new file mode 100644 index 00000000000..e21f2632a6e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go @@ -0,0 +1,321 @@ +package http + +import ( + "context" +) + +func icopy[T any](v []T) []T { + s := make([]T, len(v)) + copy(s, v) + return s +} + +// InterceptorContext is all the information available in different +// interceptors. +// +// Not all information is available in each interceptor, see each interface +// definition for more details. +type InterceptorContext struct { + Input any + Request *Request + + Output any + Response *Response +} + +// InterceptorRegistry holds a list of operation interceptors. +// +// Interceptors allow callers to insert custom behavior at well-defined points +// within a client's operation lifecycle. +// +// # Interceptor context +// +// All interceptors are invoked with a context object that contains input and +// output containers for the operation. The individual fields that are +// available will depend on what the interceptor is and, in certain +// interceptors, how far the operation was able to progress. See the +// documentation for each interface definition for more information about field +// availability. +// +// Implementations MUST NOT directly mutate the values of the fields in the +// interceptor context. They are free to mutate the existing values _pointed +// to_ by those fields, however. +// +// # Returning errors +// +// All interceptors can return errors. If an interceptor returns an error +// _before_ the client's retry loop, the operation will fail immediately. If +// one returns an error _within_ the retry loop, the error WILL be considered +// according to the client's retry policy. +// +// # Adding interceptors +// +// Idiomatically you will simply use one of the Add() receiver methods to +// register interceptors as desired. However, the list for each interface is +// exported on the registry struct and the caller is free to manipulate it +// directly, for example, to register a number of interceptors all at once, or +// to remove one that was previously registered. +// +// The base SDK client WILL NOT add any interceptors. SDK operations and +// customizations are implemented in terms of middleware. +// +// Modifications to the registry will not persist across operation calls when +// using per-operation functional options. This means you can register +// interceptors on a per-operation basis without affecting other operations. +type InterceptorRegistry struct { + BeforeExecution []BeforeExecutionInterceptor + BeforeSerialization []BeforeSerializationInterceptor + AfterSerialization []AfterSerializationInterceptor + BeforeRetryLoop []BeforeRetryLoopInterceptor + BeforeAttempt []BeforeAttemptInterceptor + BeforeSigning []BeforeSigningInterceptor + AfterSigning []AfterSigningInterceptor + BeforeTransmit []BeforeTransmitInterceptor + AfterTransmit []AfterTransmitInterceptor + BeforeDeserialization []BeforeDeserializationInterceptor + AfterDeserialization []AfterDeserializationInterceptor + AfterAttempt []AfterAttemptInterceptor + AfterExecution []AfterExecutionInterceptor +} + +// Copy returns a deep copy of the registry. This is used by SDK clients on +// each operation call in order to prevent per-op config mutation from +// persisting. +func (i *InterceptorRegistry) Copy() InterceptorRegistry { + return InterceptorRegistry{ + BeforeExecution: icopy(i.BeforeExecution), + BeforeSerialization: icopy(i.BeforeSerialization), + AfterSerialization: icopy(i.AfterSerialization), + BeforeRetryLoop: icopy(i.BeforeRetryLoop), + BeforeAttempt: icopy(i.BeforeAttempt), + BeforeSigning: icopy(i.BeforeSigning), + AfterSigning: icopy(i.AfterSigning), + BeforeTransmit: icopy(i.BeforeTransmit), + AfterTransmit: icopy(i.AfterTransmit), + BeforeDeserialization: icopy(i.BeforeDeserialization), + AfterDeserialization: icopy(i.AfterDeserialization), + AfterAttempt: icopy(i.AfterAttempt), + AfterExecution: icopy(i.AfterExecution), + } +} + +// AddBeforeExecution registers the provided BeforeExecutionInterceptor. +func (i *InterceptorRegistry) AddBeforeExecution(v BeforeExecutionInterceptor) { + i.BeforeExecution = append(i.BeforeExecution, v) +} + +// AddBeforeSerialization registers the provided BeforeSerializationInterceptor. +func (i *InterceptorRegistry) AddBeforeSerialization(v BeforeSerializationInterceptor) { + i.BeforeSerialization = append(i.BeforeSerialization, v) +} + +// AddAfterSerialization registers the provided AfterSerializationInterceptor. +func (i *InterceptorRegistry) AddAfterSerialization(v AfterSerializationInterceptor) { + i.AfterSerialization = append(i.AfterSerialization, v) +} + +// AddBeforeRetryLoop registers the provided BeforeRetryLoopInterceptor. +func (i *InterceptorRegistry) AddBeforeRetryLoop(v BeforeRetryLoopInterceptor) { + i.BeforeRetryLoop = append(i.BeforeRetryLoop, v) +} + +// AddBeforeAttempt registers the provided BeforeAttemptInterceptor. +func (i *InterceptorRegistry) AddBeforeAttempt(v BeforeAttemptInterceptor) { + i.BeforeAttempt = append(i.BeforeAttempt, v) +} + +// AddBeforeSigning registers the provided BeforeSigningInterceptor. +func (i *InterceptorRegistry) AddBeforeSigning(v BeforeSigningInterceptor) { + i.BeforeSigning = append(i.BeforeSigning, v) +} + +// AddAfterSigning registers the provided AfterSigningInterceptor. +func (i *InterceptorRegistry) AddAfterSigning(v AfterSigningInterceptor) { + i.AfterSigning = append(i.AfterSigning, v) +} + +// AddBeforeTransmit registers the provided BeforeTransmitInterceptor. +func (i *InterceptorRegistry) AddBeforeTransmit(v BeforeTransmitInterceptor) { + i.BeforeTransmit = append(i.BeforeTransmit, v) +} + +// AddAfterTransmit registers the provided AfterTransmitInterceptor. +func (i *InterceptorRegistry) AddAfterTransmit(v AfterTransmitInterceptor) { + i.AfterTransmit = append(i.AfterTransmit, v) +} + +// AddBeforeDeserialization registers the provided BeforeDeserializationInterceptor. +func (i *InterceptorRegistry) AddBeforeDeserialization(v BeforeDeserializationInterceptor) { + i.BeforeDeserialization = append(i.BeforeDeserialization, v) +} + +// AddAfterDeserialization registers the provided AfterDeserializationInterceptor. +func (i *InterceptorRegistry) AddAfterDeserialization(v AfterDeserializationInterceptor) { + i.AfterDeserialization = append(i.AfterDeserialization, v) +} + +// AddAfterAttempt registers the provided AfterAttemptInterceptor. +func (i *InterceptorRegistry) AddAfterAttempt(v AfterAttemptInterceptor) { + i.AfterAttempt = append(i.AfterAttempt, v) +} + +// AddAfterExecution registers the provided AfterExecutionInterceptor. +func (i *InterceptorRegistry) AddAfterExecution(v AfterExecutionInterceptor) { + i.AfterExecution = append(i.AfterExecution, v) +} + +// BeforeExecutionInterceptor runs before anything else in the operation +// lifecycle. +// +// Available InterceptorContext fields: +// - Input +type BeforeExecutionInterceptor interface { + BeforeExecution(ctx context.Context, in *InterceptorContext) error +} + +// BeforeSerializationInterceptor runs before the operation input is serialized +// into its transport request. +// +// Serialization occurs before the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +type BeforeSerializationInterceptor interface { + BeforeSerialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterSerializationInterceptor runs after the operation input is serialized +// into its transport request. +// +// Available InterceptorContext fields: +// - Input +// - Request +type AfterSerializationInterceptor interface { + AfterSerialization(ctx context.Context, in *InterceptorContext) error +} + +// BeforeRetryLoopInterceptor runs right before the operation enters the retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeRetryLoopInterceptor interface { + BeforeRetryLoop(ctx context.Context, in *InterceptorContext) error +} + +// BeforeAttemptInterceptor runs right before every attempt in the retry loop. +// +// If this interceptor returns an error, AfterAttempt interceptors WILL NOT be +// invoked. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeAttemptInterceptor interface { + BeforeAttempt(ctx context.Context, in *InterceptorContext) error +} + +// BeforeSigningInterceptor runs right before the request is signed. +// +// Signing occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeSigningInterceptor interface { + BeforeSigning(ctx context.Context, in *InterceptorContext) error +} + +// AfterSigningInterceptor runs right after the request is signed. +// +// It is unsafe to modify the outgoing HTTP request at or past this hook, since +// doing so may invalidate the signature of the request. +// +// Available InterceptorContext fields: +// - Input +// - Request +type AfterSigningInterceptor interface { + AfterSigning(ctx context.Context, in *InterceptorContext) error +} + +// BeforeTransmitInterceptor runs right before the HTTP request is sent. +// +// HTTP transmit occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeTransmitInterceptor interface { + BeforeTransmit(ctx context.Context, in *InterceptorContext) error +} + +// AfterTransmitInterceptor runs right after the HTTP response is received. +// +// It will always be invoked when a response is received, regardless of its +// status code. Conversely, it WILL NOT be invoked if the HTTP round-trip was +// not successful, e.g. because of a DNS resolution error +// +// Available InterceptorContext fields: +// - Input +// - Request +// - Response +type AfterTransmitInterceptor interface { + AfterTransmit(ctx context.Context, in *InterceptorContext) error +} + +// BeforeDeserializationInterceptor runs right before the incoming HTTP response +// is deserialized. +// +// This interceptor IS NOT invoked if the HTTP round-trip was not successful. +// +// Deserialization occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +// - Response +type BeforeDeserializationInterceptor interface { + BeforeDeserialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterDeserializationInterceptor runs right after the incoming HTTP response +// is deserialized. This hook is invoked regardless of whether the deserialized +// result was an error. +// +// This interceptor IS NOT invoked if the HTTP round-trip was not successful. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request +// - Response +type AfterDeserializationInterceptor interface { + AfterDeserialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterAttemptInterceptor runs right after the incoming HTTP response +// is deserialized. This hook is invoked regardless of whether the deserialized +// result was an error, or if another interceptor within the retry loop +// returned an error. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request (IF the operation did not return an error during serialization) +// - Response (IF the operation was able to transmit the HTTP request) +type AfterAttemptInterceptor interface { + AfterAttempt(ctx context.Context, in *InterceptorContext) error +} + +// AfterExecutionInterceptor runs after everything else. It runs regardless of +// how far the operation progressed in its lifecycle, and regardless of whether +// the operation succeeded or failed. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request (IF the operation did not return an error during serialization) +// - Response (IF the operation was able to transmit the HTTP request) +type AfterExecutionInterceptor interface { + AfterExecution(ctx context.Context, in *InterceptorContext) error +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go new file mode 100644 index 00000000000..2cc4b57f894 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go @@ -0,0 +1,325 @@ +package http + +import ( + "context" + "errors" + + "github.com/aws/smithy-go/middleware" +) + +type ictxKey struct{} + +func withIctx(ctx context.Context) context.Context { + return middleware.WithStackValue(ctx, ictxKey{}, &InterceptorContext{}) +} + +func getIctx(ctx context.Context) *InterceptorContext { + return middleware.GetStackValue(ctx, ictxKey{}).(*InterceptorContext) +} + +// InterceptExecution runs Before/AfterExecutionInterceptors. +type InterceptExecution struct { + BeforeExecution []BeforeExecutionInterceptor + AfterExecution []AfterExecutionInterceptor +} + +// ID identifies the middleware. +func (m *InterceptExecution) ID() string { + return "InterceptExecution" +} + +// HandleInitialize runs the interceptors. +func (m *InterceptExecution) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, md middleware.Metadata, err error, +) { + ctx = withIctx(ctx) + getIctx(ctx).Input = in.Parameters + + for _, i := range m.BeforeExecution { + if err := i.BeforeExecution(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleInitialize(ctx, in) + + for _, i := range m.AfterExecution { + if err := i.AfterExecution(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptBeforeSerialization runs BeforeSerializationInterceptors. +type InterceptBeforeSerialization struct { + Interceptors []BeforeSerializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeSerialization) ID() string { + return "InterceptBeforeSerialization" +} + +// HandleSerialize runs the interceptors. +func (m *InterceptBeforeSerialization) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeSerialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleSerialize(ctx, in) +} + +// InterceptAfterSerialization runs AfterSerializationInterceptors. +type InterceptAfterSerialization struct { + Interceptors []AfterSerializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterSerialization) ID() string { + return "InterceptAfterSerialization" +} + +// HandleSerialize runs the interceptors. +func (m *InterceptAfterSerialization) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, md middleware.Metadata, err error, +) { + getIctx(ctx).Request = in.Request.(*Request) + + for _, i := range m.Interceptors { + if err := i.AfterSerialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleSerialize(ctx, in) +} + +// InterceptBeforeRetryLoop runs BeforeRetryLoopInterceptors. +type InterceptBeforeRetryLoop struct { + Interceptors []BeforeRetryLoopInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeRetryLoop) ID() string { + return "InterceptBeforeRetryLoop" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptBeforeRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeRetryLoop(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptBeforeSigning runs BeforeSigningInterceptors. +type InterceptBeforeSigning struct { + Interceptors []BeforeSigningInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeSigning) ID() string { + return "InterceptBeforeSigning" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptBeforeSigning) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeSigning(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptAfterSigning runs AfterSigningInterceptors. +type InterceptAfterSigning struct { + Interceptors []AfterSigningInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterSigning) ID() string { + return "InterceptAfterSigning" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptAfterSigning) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.AfterSigning(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptTransmit runs BeforeTransmitInterceptors and AfterTransmitInterceptors. +type InterceptTransmit struct { + BeforeTransmit []BeforeTransmitInterceptor + AfterTransmit []AfterTransmitInterceptor +} + +// ID identifies the middleware. +func (m *InterceptTransmit) ID() string { + return "InterceptTransmit" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptTransmit) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.BeforeTransmit { + if err := i.BeforeTransmit(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, md, err + } + + // the root of the decorated middleware guarantees this will be here + // (client.go: ClientHandler.Handle) + getIctx(ctx).Response = out.RawResponse.(*Response) + + for _, i := range m.AfterTransmit { + if err := i.AfterTransmit(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptBeforeDeserialization runs BeforeDeserializationInterceptors. +type InterceptBeforeDeserialization struct { + Interceptors []BeforeDeserializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeDeserialization) ID() string { + return "InterceptBeforeDeserialization" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptBeforeDeserialization) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + var terr *RequestSendError + if errors.As(err, &terr) { + return out, md, err + } + } + + for _, i := range m.Interceptors { + if err := i.BeforeDeserialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptAfterDeserialization runs AfterDeserializationInterceptors. +type InterceptAfterDeserialization struct { + Interceptors []AfterDeserializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterDeserialization) ID() string { + return "InterceptAfterDeserialization" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptAfterDeserialization) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + var terr *RequestSendError + if errors.As(err, &terr) { + return out, md, err + } + } + + getIctx(ctx).Output = out.Result + + for _, i := range m.Interceptors { + if err := i.AfterDeserialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptAttempt runs AfterAttemptInterceptors. +type InterceptAttempt struct { + BeforeAttempt []BeforeAttemptInterceptor + AfterAttempt []AfterAttemptInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAttempt) ID() string { + return "InterceptAttempt" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptAttempt) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.BeforeAttempt { + if err := i.BeforeAttempt(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleFinalize(ctx, in) + + for _, i := range m.AfterAttempt { + if err := i.AfterAttempt(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} diff --git a/vendor/github.com/prometheus/otlptranslator/README.md b/vendor/github.com/prometheus/otlptranslator/README.md index 3b31a448eca..b09484e2749 100644 --- a/vendor/github.com/prometheus/otlptranslator/README.md +++ b/vendor/github.com/prometheus/otlptranslator/README.md @@ -1,2 +1,120 @@ -# otlp-prometheus-translator -Library providing API to convert OTLP metric and attribute names to respectively Prometheus metric and label names. +# OTLP Prometheus Translator + +A Go library for converting [OpenTelemetry Protocol (OTLP)](https://opentelemetry.io/docs/specs/otlp/) metric and attribute names to [Prometheus](https://prometheus.io/)-compliant formats. + +Part of the [Prometheus](https://prometheus.io/) ecosystem, following the [OpenTelemetry to Prometheus compatibility specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/compatibility/prometheus_and_openmetrics.md). + +## Features + +- **Metric Name and Label Translation**: Convert OTLP metric names and attributes to Prometheus-compliant format +- **Unit Handling**: Translate OTLP units to Prometheus unit conventions +- **Type-Aware Suffixes**: Optionally append `_total`, `_ratio` based on metric type +- **Namespace Support**: Add configurable namespace prefixes +- **UTF-8 Support**: Choose between Prometheus legacy scheme compliant metric/label names (`[a-zA-Z0-9:_]`) or untranslated metric/label names +- **Translation Strategy Configuration**: Select a translation strategy with a standard set of strings. + +## Installation + +```bash +go get github.com/prometheus/otlptranslator +``` + +## Quick Start + +```go +package main + +import ( + "fmt" + "github.com/prometheus/otlptranslator" +) + +func main() { + // Create a metric namer using traditional Prometheus name translation, with suffixes added and UTF-8 disallowed. + strategy := otlptranslator.UnderscoreEscapingWithSuffixes + namer := otlptranslator.NewMetricNamer("myapp", strategy) + + // Translate OTLP metric to Prometheus format + metric := otlptranslator.Metric{ + Name: "http.server.request.duration", + Unit: "s", + Type: otlptranslator.MetricTypeHistogram, + } + fmt.Println(namer.Build(metric)) // Output: myapp_http_server_request_duration_seconds + + // Translate label names + labelNamer := otlptranslator.LabelNamer{UTF8Allowed: false} + fmt.Println(labelNamer.Build("http.method")) // Output: http_method +} +``` + +## Usage Examples + +### Metric Name Translation + +```go +namer := otlptranslator.MetricNamer{WithMetricSuffixes: true, UTF8Allowed: false} + +// Counter gets _total suffix +counter := otlptranslator.Metric{ + Name: "requests.count", Unit: "1", Type: otlptranslator.MetricTypeMonotonicCounter, +} +fmt.Println(namer.Build(counter)) // requests_count_total + +// Gauge with unit conversion +gauge := otlptranslator.Metric{ + Name: "memory.usage", Unit: "By", Type: otlptranslator.MetricTypeGauge, +} +fmt.Println(namer.Build(gauge)) // memory_usage_bytes + +// Dimensionless gauge gets _ratio suffix +ratio := otlptranslator.Metric{ + Name: "cpu.utilization", Unit: "1", Type: otlptranslator.MetricTypeGauge, +} +fmt.Println(namer.Build(ratio)) // cpu_utilization_ratio +``` + +### Label Translation + +```go +labelNamer := otlptranslator.LabelNamer{UTF8Allowed: false} + +labelNamer.Build("http.method") // http_method +labelNamer.Build("123invalid") // key_123invalid +labelNamer.Build("_private") // key_private +labelNamer.Build("__reserved__") // __reserved__ (preserved) +labelNamer.Build("label@with$symbols") // label_with_symbols +``` + +### Unit Translation + +```go +unitNamer := otlptranslator.UnitNamer{UTF8Allowed: false} + +unitNamer.Build("s") // seconds +unitNamer.Build("By") // bytes +unitNamer.Build("requests/s") // requests_per_second +unitNamer.Build("1") // "" (dimensionless) +``` + +### Configuration Options + +```go +// Prometheus-compliant mode - supports [a-zA-Z0-9:_] +compliantNamer := otlptranslator.MetricNamer{UTF8Allowed: false, WithMetricSuffixes: true} + +// Transparent pass-through mode, aka "NoTranslation" +utf8Namer := otlptranslator.MetricNamer{UTF8Allowed: true, WithMetricSuffixes: false} +utf8Namer = otlptranslator.NewMetricNamer("", otlpTranslator.NoTranslation) + +// With namespace and suffixes +productionNamer := otlptranslator.MetricNamer{ + Namespace: "myservice", + WithMetricSuffixes: true, + UTF8Allowed: false, +} +``` + +## License + +Licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/prometheus/otlptranslator/doc.go b/vendor/github.com/prometheus/otlptranslator/doc.go new file mode 100644 index 00000000000..a704d819045 --- /dev/null +++ b/vendor/github.com/prometheus/otlptranslator/doc.go @@ -0,0 +1,24 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otlptranslator provides utilities for converting OpenTelemetry Protocol (OTLP) +// metric and attribute names to Prometheus-compliant formats. +// +// This package is designed to help users translate OpenTelemetry metrics to Prometheus +// metrics while following the official OpenTelemetry to Prometheus compatibility specification. +// +// Main components: +// - MetricNamer: Translates OTLP metric names to Prometheus metric names +// - LabelNamer: Translates OTLP attribute names to Prometheus label names +// - UnitNamer: Translates OTLP units to Prometheus unit conventions +package otlptranslator diff --git a/vendor/github.com/prometheus/otlptranslator/label_namer.go b/vendor/github.com/prometheus/otlptranslator/label_namer.go new file mode 100644 index 00000000000..00072a39e8d --- /dev/null +++ b/vendor/github.com/prometheus/otlptranslator/label_namer.go @@ -0,0 +1,90 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/93e991ef7ed19cc997a9360c8016cac3767b8057/storage/remote/otlptranslator/prometheus/normalize_label.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The Prometheus Authors +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. + +package otlptranslator + +import ( + "fmt" + "strings" + "unicode" +) + +// LabelNamer is a helper struct to build label names. +// It translates OpenTelemetry Protocol (OTLP) attribute names to Prometheus-compliant label names. +// +// Example usage: +// +// namer := LabelNamer{UTF8Allowed: false} +// result := namer.Build("http.method") // "http_method" +type LabelNamer struct { + UTF8Allowed bool +} + +// Build normalizes the specified label to follow Prometheus label names standard. +// +// Translation rules: +// - Replaces invalid characters with underscores +// - Prefixes labels with invalid start characters (numbers or `_`) with "key" +// - Preserves double underscore labels (reserved names) +// - If UTF8Allowed is true, returns label as-is +// +// Examples: +// +// namer := LabelNamer{UTF8Allowed: false} +// namer.Build("http.method") // "http_method" +// namer.Build("123invalid") // "key_123invalid" +// namer.Build("__reserved__") // "__reserved__" (preserved) +func (ln *LabelNamer) Build(label string) (normalizedName string, err error) { + defer func() { + if len(normalizedName) == 0 { + err = fmt.Errorf("normalization for label name %q resulted in empty name", label) + return + } + + if ln.UTF8Allowed || normalizedName == label { + return + } + + // Check that the resulting normalized name contains at least one non-underscore character + for _, c := range normalizedName { + if c != '_' { + return + } + } + err = fmt.Errorf("normalization for label name %q resulted in invalid name %q", label, normalizedName) + normalizedName = "" + }() + + // Trivial case. + if len(label) == 0 || ln.UTF8Allowed { + normalizedName = label + return + } + + normalizedName = sanitizeLabelName(label) + + // If label starts with a number, prepend with "key_". + if unicode.IsDigit(rune(normalizedName[0])) { + normalizedName = "key_" + normalizedName + } else if strings.HasPrefix(normalizedName, "_") && !strings.HasPrefix(normalizedName, "__") { + normalizedName = "key" + normalizedName + } + + return +} diff --git a/vendor/github.com/prometheus/otlptranslator/metric_namer.go b/vendor/github.com/prometheus/otlptranslator/metric_namer.go index 21c45fcdab8..2e9d6b46fb5 100644 --- a/vendor/github.com/prometheus/otlptranslator/metric_namer.go +++ b/vendor/github.com/prometheus/otlptranslator/metric_namer.go @@ -20,6 +20,7 @@ package otlptranslator import ( + "fmt" "slices" "strings" "unicode" @@ -81,13 +82,48 @@ var perUnitMap = map[string]string{ } // MetricNamer is a helper struct to build metric names. +// It converts OpenTelemetry Protocol (OTLP) metric names to Prometheus-compliant metric names. +// +// Example usage: +// +// namer := MetricNamer{ +// WithMetricSuffixes: true, +// UTF8Allowed: false, +// } +// +// metric := Metric{ +// Name: "http.server.duration", +// Unit: "s", +// Type: MetricTypeHistogram, +// } +// +// result := namer.Build(metric) // "http_server_duration_seconds" type MetricNamer struct { Namespace string WithMetricSuffixes bool UTF8Allowed bool } +// NewMetricNamer creates a MetricNamer with the specified namespace (can be +// blank) and the requested Translation Strategy. +func NewMetricNamer(namespace string, strategy TranslationStrategyOption) MetricNamer { + return MetricNamer{ + Namespace: namespace, + WithMetricSuffixes: strategy.ShouldAddSuffixes(), + UTF8Allowed: !strategy.ShouldEscape(), + } +} + // Metric is a helper struct that holds information about a metric. +// It represents an OpenTelemetry metric with its name, unit, and type. +// +// Example: +// +// metric := Metric{ +// Name: "http.server.request.duration", +// Unit: "s", +// Type: MetricTypeHistogram, +// } type Metric struct { Name string Unit string @@ -96,21 +132,56 @@ type Metric struct { // Build builds a metric name for the specified metric. // -// If UTF8Allowed is true, the metric name is returned as is, only with the addition of type/unit suffixes and namespace preffix if required. -// Otherwise the metric name is normalized to be Prometheus-compliant. -// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels, -// https://prometheus.io/docs/practices/naming/#metric-and-label-naming -func (mn *MetricNamer) Build(metric Metric) string { +// The method applies different transformations based on the MetricNamer configuration: +// - If UTF8Allowed is true, doesn't translate names - all characters must be valid UTF-8, however. +// - If UTF8Allowed is false, translates metric names to comply with legacy Prometheus name scheme by escaping invalid characters to `_`. +// - If WithMetricSuffixes is true, adds appropriate suffixes based on type and unit. +// +// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels +// +// Examples: +// +// namer := MetricNamer{WithMetricSuffixes: true, UTF8Allowed: false} +// +// // Counter gets _total suffix +// counter := Metric{Name: "requests.count", Unit: "1", Type: MetricTypeMonotonicCounter} +// result := namer.Build(counter) // "requests_count_total" +// +// // Gauge with unit suffix +// gauge := Metric{Name: "memory.usage", Unit: "By", Type: MetricTypeGauge} +// result = namer.Build(gauge) // "memory_usage_bytes" +func (mn *MetricNamer) Build(metric Metric) (string, error) { if mn.UTF8Allowed { return mn.buildMetricName(metric.Name, metric.Unit, metric.Type) } return mn.buildCompliantMetricName(metric.Name, metric.Unit, metric.Type) } -func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType MetricType) string { +func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType MetricType) (normalizedName string, err error) { + defer func() { + if len(normalizedName) == 0 { + err = fmt.Errorf("normalization for metric %q resulted in empty name", name) + return + } + + if normalizedName == name { + return + } + + // Check that the resulting normalized name contains at least one non-underscore character + for _, c := range normalizedName { + if c != '_' { + return + } + } + err = fmt.Errorf("normalization for metric %q resulted in invalid name %q", name, normalizedName) + normalizedName = "" + }() + // Full normalization following standard Prometheus naming conventions if mn.WithMetricSuffixes { - return normalizeName(name, unit, metricType, mn.Namespace) + normalizedName = normalizeName(name, unit, metricType, mn.Namespace) + return } // Simple case (no full normalization, no units, etc.). @@ -120,7 +191,11 @@ func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType Me // Namespace? if mn.Namespace != "" { - return mn.Namespace + "_" + metricName + namespace := strings.Join(strings.FieldsFunc(mn.Namespace, func(r rune) bool { + return invalidMetricCharRE.MatchString(string(r)) + }), "_") + normalizedName = namespace + "_" + metricName + return } // Metric name starts with a digit? Prefix it with an underscore. @@ -128,7 +203,8 @@ func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType Me metricName = "_" + metricName } - return metricName + normalizedName = metricName + return } var ( @@ -240,33 +316,54 @@ func removeItem(slice []string, value string) []string { return newSlice } -func (mn *MetricNamer) buildMetricName(name, unit string, metricType MetricType) string { +func (mn *MetricNamer) buildMetricName(inputName, unit string, metricType MetricType) (name string, err error) { + name = inputName if mn.Namespace != "" { name = mn.Namespace + "_" + name } if mn.WithMetricSuffixes { - mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(unit) - if mainUnitSuffix != "" { - name = name + "_" + mainUnitSuffix - } - if perUnitSuffix != "" { - name = name + "_" + perUnitSuffix - } - - // Append _total for Counters - if metricType == MetricTypeMonotonicCounter { - name += "_total" - } - // Append _ratio for metrics with unit "1" // Some OTel receivers improperly use unit "1" for counters of objects // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions // Until these issues have been fixed, we're appending `_ratio` for gauges ONLY // Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons) if unit == "1" && metricType == MetricTypeGauge { - name += "_ratio" + name = trimSuffixAndDelimiter(name, "ratio") + defer func() { + name += "_ratio" + }() + } + + // Append _total for Counters. + if metricType == MetricTypeMonotonicCounter { + name = trimSuffixAndDelimiter(name, "total") + defer func() { + name += "_total" + }() + } + + mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(unit) + if perUnitSuffix != "" { + name = trimSuffixAndDelimiter(name, perUnitSuffix) + defer func() { + name = name + "_" + perUnitSuffix + }() } + // We don't need to trim and re-append the suffix here because this is + // the inner-most suffix. + if mainUnitSuffix != "" && !strings.HasSuffix(name, mainUnitSuffix) { + name = name + "_" + mainUnitSuffix + } + } + return +} + +// trimSuffixAndDelimiter trims a suffix, plus one extra character which is +// assumed to be a delimiter. +func trimSuffixAndDelimiter(name, suffix string) string { + if strings.HasSuffix(name, suffix) && len(name) > len(suffix)+1 { + return name[:len(name)-(len(suffix)+1)] } return name } diff --git a/vendor/github.com/prometheus/otlptranslator/normalize_label.go b/vendor/github.com/prometheus/otlptranslator/normalize_label.go deleted file mode 100644 index aa771f7840b..00000000000 --- a/vendor/github.com/prometheus/otlptranslator/normalize_label.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/93e991ef7ed19cc997a9360c8016cac3767b8057/storage/remote/otlptranslator/prometheus/normalize_label.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Copyright The Prometheus Authors -// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. - -package otlptranslator - -import ( - "strings" - "unicode" -) - -// LabelNamer is a helper struct to build label names. -type LabelNamer struct { - UTF8Allowed bool -} - -// Build normalizes the specified label to follow Prometheus label names standard. -// -// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels. -// -// Labels that start with non-letter rune will be prefixed with "key_". -// An exception is made for double-underscores which are allowed. -// -// If UTF8Allowed is true, the label is returned as is. This option is provided just to -// keep a consistent interface with the MetricNamer. -func (ln *LabelNamer) Build(label string) string { - // Trivial case. - if len(label) == 0 || ln.UTF8Allowed { - return label - } - - label = sanitizeLabelName(label) - - // If label starts with a number, prepend with "key_". - if unicode.IsDigit(rune(label[0])) { - label = "key_" + label - } else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") { - label = "key" + label - } - - return label -} diff --git a/vendor/github.com/prometheus/otlptranslator/strategy.go b/vendor/github.com/prometheus/otlptranslator/strategy.go new file mode 100644 index 00000000000..20fe0197504 --- /dev/null +++ b/vendor/github.com/prometheus/otlptranslator/strategy.go @@ -0,0 +1,86 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/3602785a89162ccc99a940fb9d862219a2d02241/config/config.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The Prometheus Authors + +package otlptranslator + +// TranslationStrategyOption is a constant that defines how metric and label +// names should be handled during translation. The recommended approach is to +// use either UnderscoreEscapingWithSuffixes for full Prometheus-style +// compatibility, or NoTranslation for Otel-style names. +type TranslationStrategyOption string + +var ( + // NoUTF8EscapingWithSuffixes will accept metric/label names as they are. Unit + // and type suffixes may be added to metric names, according to certain rules. + NoUTF8EscapingWithSuffixes TranslationStrategyOption = "NoUTF8EscapingWithSuffixes" + // UnderscoreEscapingWithSuffixes is the default option for translating OTLP + // to Prometheus. This option will translate metric name characters that are + // not alphanumerics/underscores/colons to underscores, and label name + // characters that are not alphanumerics/underscores to underscores. Unit and + // type suffixes may be appended to metric names, according to certain rules. + UnderscoreEscapingWithSuffixes TranslationStrategyOption = "UnderscoreEscapingWithSuffixes" + // UnderscoreEscapingWithoutSuffixes translates metric name characters that + // are not alphanumerics/underscores/colons to underscores, and label name + // characters that are not alphanumerics/underscores to underscores, but + // unlike UnderscoreEscapingWithSuffixes it does not append any suffixes to + // the names. + UnderscoreEscapingWithoutSuffixes TranslationStrategyOption = "UnderscoreEscapingWithoutSuffixes" + // NoTranslation (EXPERIMENTAL): disables all translation of incoming metric + // and label names. This offers a way for the OTLP users to use native metric + // names, reducing confusion. + // + // WARNING: This setting has significant known risks and limitations (see + // https://prometheus.io/docs/practices/naming/ for details): * Impaired UX + // when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling + // configuration). * Series collisions which in the best case may result in + // OOO errors, in the worst case a silently malformed time series. For + // instance, you may end up in situation of ingesting `foo.bar` series with + // unit `seconds` and a separate series `foo.bar` with unit `milliseconds`. + // + // As a result, this setting is experimental and currently, should not be used + // in production systems. + // + // TODO(ArthurSens): Mention `type-and-unit-labels` feature + // (https://github.com/prometheus/proposals/pull/39) once released, as + // potential mitigation of the above risks. + NoTranslation TranslationStrategyOption = "NoTranslation" +) + +// ShouldEscape returns true if the translation strategy requires that metric +// names be escaped. +func (o TranslationStrategyOption) ShouldEscape() bool { + switch o { + case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes: + return true + case NoTranslation, NoUTF8EscapingWithSuffixes: + return false + default: + return false + } +} + +// ShouldAddSuffixes returns a bool deciding whether the given translation +// strategy should have suffixes added. +func (o TranslationStrategyOption) ShouldAddSuffixes() bool { + switch o { + case UnderscoreEscapingWithSuffixes, NoUTF8EscapingWithSuffixes: + return true + case UnderscoreEscapingWithoutSuffixes, NoTranslation: + return false + default: + return false + } +} diff --git a/vendor/github.com/prometheus/otlptranslator/unit_namer.go b/vendor/github.com/prometheus/otlptranslator/unit_namer.go index 4bbf93ef97c..bb41fa89e57 100644 --- a/vendor/github.com/prometheus/otlptranslator/unit_namer.go +++ b/vendor/github.com/prometheus/otlptranslator/unit_namer.go @@ -15,14 +15,34 @@ package otlptranslator import "strings" // UnitNamer is a helper for building compliant unit names. +// It processes OpenTelemetry Protocol (OTLP) unit strings and converts them +// to Prometheus-compliant unit names. +// +// Example usage: +// +// namer := UnitNamer{UTF8Allowed: false} +// result := namer.Build("s") // "seconds" +// result = namer.Build("By/s") // "bytes_per_second" type UnitNamer struct { UTF8Allowed bool } // Build builds a unit name for the specified unit string. // It processes the unit by splitting it into main and per components, -// applying appropriate unit mappings, and cleaning up invalid characters -// when the whole UTF-8 character set is not allowed. +// applying unit mappings, and cleaning up invalid characters when UTF8Allowed is false. +// +// Unit mappings include: +// - Time: s→seconds, ms→milliseconds, h→hours +// - Bytes: By→bytes, KBy→kilobytes, MBy→megabytes +// - SI: m→meters, V→volts, W→watts +// - Special: 1→"" (empty), %→percent +// +// Examples: +// +// namer := UnitNamer{UTF8Allowed: false} +// namer.Build("s") // "seconds" +// namer.Build("requests/s") // "requests_per_second" +// namer.Build("1") // "" (dimensionless) func (un *UnitNamer) Build(unit string) string { mainUnit, perUnit := buildUnitSuffixes(unit) if !un.UTF8Allowed { diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 7099ba325ab..64dae3e8ace 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -31,6 +31,7 @@ import ( "github.com/grafana/regexp" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/otlptranslator" "github.com/prometheus/sigv4" "gopkg.in/yaml.v2" @@ -104,9 +105,9 @@ func Load(s string, logger *slog.Logger) (*Config, error) { } switch cfg.OTLPConfig.TranslationStrategy { - case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes: + case otlptranslator.UnderscoreEscapingWithSuffixes, otlptranslator.UnderscoreEscapingWithoutSuffixes: case "": - case NoTranslation, NoUTF8EscapingWithSuffixes: + case otlptranslator.NoTranslation, otlptranslator.NoUTF8EscapingWithSuffixes: if cfg.GlobalConfig.MetricNameValidationScheme == model.LegacyValidation { return nil, fmt.Errorf("OTLP translation strategy %q is not allowed when UTF8 is disabled", cfg.OTLPConfig.TranslationStrategy) } @@ -257,7 +258,7 @@ var ( // DefaultOTLPConfig is the default OTLP configuration. DefaultOTLPConfig = OTLPConfig{ - TranslationStrategy: UnderscoreEscapingWithSuffixes, + TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes, } ) @@ -884,8 +885,10 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { return fmt.Errorf("unknown global name validation method specified, must be either '', 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme) } // Scrapeconfig validation scheme matches global if left blank. + localValidationUnset := false switch c.MetricNameValidationScheme { case model.UnsetValidation: + localValidationUnset = true c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme case model.LegacyValidation, model.UTF8Validation: default: @@ -905,8 +908,20 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { return fmt.Errorf("unknown global name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %q", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, globalConfig.MetricNameEscapingScheme) } + // Similarly, if ScrapeConfig escaping scheme is blank, infer it from the + // ScrapeConfig validation scheme if that was set, or the Global validation + // scheme if the ScrapeConfig validation scheme was also not set. This ensures + // that local ScrapeConfigs that only specify Legacy validation do not inherit + // the global AllowUTF8 escaping setting, which is an error. if c.MetricNameEscapingScheme == "" { - c.MetricNameEscapingScheme = globalConfig.MetricNameEscapingScheme + //nolint:gocritic + if localValidationUnset { + c.MetricNameEscapingScheme = globalConfig.MetricNameEscapingScheme + } else if c.MetricNameValidationScheme == model.LegacyValidation { + c.MetricNameEscapingScheme = model.EscapeUnderscores + } else { + c.MetricNameEscapingScheme = model.AllowUTF8 + } } switch c.MetricNameEscapingScheme { @@ -1531,79 +1546,14 @@ func getGoGC() int { return DefaultGoGCPercentage } -type translationStrategyOption string - -var ( - // NoUTF8EscapingWithSuffixes will accept metric/label names as they are. Unit - // and type suffixes may be added to metric names, according to certain rules. - NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes" - // UnderscoreEscapingWithSuffixes is the default option for translating OTLP - // to Prometheus. This option will translate metric name characters that are - // not alphanumerics/underscores/colons to underscores, and label name - // characters that are not alphanumerics/underscores to underscores. Unit and - // type suffixes may be appended to metric names, according to certain rules. - UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes" - // UnderscoreEscapingWithoutSuffixes translates metric name characters that - // are not alphanumerics/underscores/colons to underscores, and label name - // characters that are not alphanumerics/underscores to underscores, but - // unlike UnderscoreEscapingWithSuffixes it does not append any suffixes to - // the names. - UnderscoreEscapingWithoutSuffixes translationStrategyOption = "UnderscoreEscapingWithoutSuffixes" - // NoTranslation (EXPERIMENTAL): disables all translation of incoming metric - // and label names. This offers a way for the OTLP users to use native metric - // names, reducing confusion. - // - // WARNING: This setting has significant known risks and limitations (see - // https://prometheus.io/docs/practices/naming/ for details): * Impaired UX - // when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling - // configuration). * Series collisions which in the best case may result in - // OOO errors, in the worst case a silently malformed time series. For - // instance, you may end up in situation of ingesting `foo.bar` series with - // unit `seconds` and a separate series `foo.bar` with unit `milliseconds`. - // - // As a result, this setting is experimental and currently, should not be used - // in production systems. - // - // TODO(ArthurSens): Mention `type-and-unit-labels` feature - // (https://github.com/prometheus/proposals/pull/39) once released, as - // potential mitigation of the above risks. - NoTranslation translationStrategyOption = "NoTranslation" -) - -// ShouldEscape returns true if the translation strategy requires that metric -// names be escaped. -func (o translationStrategyOption) ShouldEscape() bool { - switch o { - case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes: - return true - case NoTranslation, NoUTF8EscapingWithSuffixes: - return false - default: - return false - } -} - -// ShouldAddSuffixes returns a bool deciding whether the given translation -// strategy should have suffixes added. -func (o translationStrategyOption) ShouldAddSuffixes() bool { - switch o { - case UnderscoreEscapingWithSuffixes, NoUTF8EscapingWithSuffixes: - return true - case UnderscoreEscapingWithoutSuffixes, NoTranslation: - return false - default: - return false - } -} - // OTLPConfig is the configuration for writing to the OTLP endpoint. type OTLPConfig struct { - PromoteAllResourceAttributes bool `yaml:"promote_all_resource_attributes,omitempty"` - PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` - IgnoreResourceAttributes []string `yaml:"ignore_resource_attributes,omitempty"` - TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"` - KeepIdentifyingResourceAttributes bool `yaml:"keep_identifying_resource_attributes,omitempty"` - ConvertHistogramsToNHCB bool `yaml:"convert_histograms_to_nhcb,omitempty"` + PromoteAllResourceAttributes bool `yaml:"promote_all_resource_attributes,omitempty"` + PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` + IgnoreResourceAttributes []string `yaml:"ignore_resource_attributes,omitempty"` + TranslationStrategy otlptranslator.TranslationStrategyOption `yaml:"translation_strategy,omitempty"` + KeepIdentifyingResourceAttributes bool `yaml:"keep_identifying_resource_attributes,omitempty"` + ConvertHistogramsToNHCB bool `yaml:"convert_histograms_to_nhcb,omitempty"` // PromoteScopeMetadata controls whether to promote OTel scope metadata (i.e. name, version, schema URL, and attributes) to metric labels. // As per OTel spec, the aforementioned scope metadata should be identifying, i.e. made into metric labels. PromoteScopeMetadata bool `yaml:"promote_scope_metadata,omitempty"` diff --git a/vendor/github.com/prometheus/prometheus/discovery/discovery.go b/vendor/github.com/prometheus/prometheus/discovery/discovery.go index c400de3632f..2efffd0e194 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/discovery.go +++ b/vendor/github.com/prometheus/prometheus/discovery/discovery.go @@ -148,7 +148,7 @@ func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { // NewDiscovererMetrics returns NoopDiscovererMetrics because no metrics are // needed for this service discovery mechanism. -func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics { +func (StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics { return &NoopDiscovererMetrics{} } diff --git a/vendor/github.com/prometheus/prometheus/discovery/metrics_k8s_client.go b/vendor/github.com/prometheus/prometheus/discovery/metrics_k8s_client.go index c13ce533178..19dfd4e2479 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/metrics_k8s_client.go +++ b/vendor/github.com/prometheus/prometheus/discovery/metrics_k8s_client.go @@ -176,27 +176,27 @@ func (f *clientGoWorkqueueMetricsProvider) RegisterWithK8sGoClient() { workqueue.SetProvider(f) } -func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { +func (*clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { return clientGoWorkqueueDepthMetricVec.WithLabelValues(name) } -func (f *clientGoWorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { +func (*clientGoWorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { return clientGoWorkqueueAddsMetricVec.WithLabelValues(name) } -func (f *clientGoWorkqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { +func (*clientGoWorkqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { return clientGoWorkqueueLatencyMetricVec.WithLabelValues(name) } -func (f *clientGoWorkqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { +func (*clientGoWorkqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { return clientGoWorkqueueWorkDurationMetricVec.WithLabelValues(name) } -func (f *clientGoWorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { +func (*clientGoWorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { return clientGoWorkqueueUnfinishedWorkSecondsMetricVec.WithLabelValues(name) } -func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { +func (*clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name) } diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_slicelabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_slicelabels.go index a6e5654fa70..099603a9a8d 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_slicelabels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_slicelabels.go @@ -453,7 +453,7 @@ func NewScratchBuilder(n int) ScratchBuilder { } // NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels. -func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder { +func NewBuilderWithSymbolTable(*SymbolTable) *Builder { return NewBuilder(EmptyLabels()) } @@ -462,7 +462,7 @@ func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder { return NewScratchBuilder(n) } -func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) { +func (b *ScratchBuilder) SetSymbolTable(*SymbolTable) { // no-op } diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go index 4b9bfd15afb..8743c0149a8 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go @@ -449,11 +449,11 @@ func (ls Labels) DropReserved(shouldDropFn func(name string) bool) Labels { } // InternStrings is a no-op because it would only save when the whole set of labels is identical. -func (ls *Labels) InternStrings(_ func(string) string) { +func (*Labels) InternStrings(func(string) string) { } // ReleaseStrings is a no-op for the same reason as InternStrings. -func (ls Labels) ReleaseStrings(_ func(string)) { +func (Labels) ReleaseStrings(func(string)) { } // Builder allows modifying Labels. @@ -664,10 +664,10 @@ type SymbolTable struct{} func NewSymbolTable() *SymbolTable { return nil } -func (t *SymbolTable) Len() int { return 0 } +func (*SymbolTable) Len() int { return 0 } // NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels. -func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder { +func NewBuilderWithSymbolTable(*SymbolTable) *Builder { return NewBuilder(EmptyLabels()) } @@ -676,7 +676,7 @@ func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder { return NewScratchBuilder(n) } -func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) { +func (*ScratchBuilder) SetSymbolTable(*SymbolTable) { // no-op } diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go index 1636aacc21d..6838e094f1a 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go @@ -695,7 +695,7 @@ func (m *literalSuffixStringMatcher) Matches(s string) bool { // emptyStringMatcher matches an empty string. type emptyStringMatcher struct{} -func (m emptyStringMatcher) Matches(s string) bool { +func (emptyStringMatcher) Matches(s string) bool { return len(s) == 0 } @@ -756,7 +756,7 @@ func (m *equalMultiStringSliceMatcher) add(s string) { m.values = append(m.values, s) } -func (m *equalMultiStringSliceMatcher) addPrefix(_ string, _ bool, _ StringMatcher) { +func (*equalMultiStringSliceMatcher) addPrefix(string, bool, StringMatcher) { panic("not implemented") } @@ -897,7 +897,7 @@ func toNormalisedLowerSlow(s string, i int, a []byte) string { // (including an empty one) as far as it doesn't contain any newline character. type anyStringWithoutNewlineMatcher struct{} -func (m anyStringWithoutNewlineMatcher) Matches(s string) bool { +func (anyStringWithoutNewlineMatcher) Matches(s string) bool { // We need to make sure it doesn't contain a newline. Since the newline is // an ASCII character, we can use strings.IndexByte(). return strings.IndexByte(s, '\n') == -1 @@ -947,7 +947,7 @@ func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool { // trueMatcher is a stringMatcher which matches any string (always returns true). type trueMatcher struct{} -func (m trueMatcher) Matches(_ string) bool { +func (trueMatcher) Matches(string) bool { return true } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index d9c37a78b72..abbc8c5a66a 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -172,7 +172,7 @@ func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) { // Histogram returns (nil, nil, nil, nil) for now because OpenMetrics does not // support sparse histograms yet. -func (p *OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { +func (*OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { return nil, nil, nil, nil } @@ -183,7 +183,7 @@ func (p *OpenMetricsParser) Help() ([]byte, []byte) { m := p.l.b[p.offsets[0]:p.offsets[1]] // Replacer causes allocations. Replace only when necessary. - if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 { + if bytes.IndexByte(p.text, byte('\\')) >= 0 { // OpenMetrics always uses the Prometheus format label value escaping. return m, []byte(lvalReplacer.Replace(string(p.text))) } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index 5ca61d1972c..6c782464a29 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -17,6 +17,7 @@ package textparse import ( + "bytes" "errors" "fmt" "io" @@ -188,7 +189,7 @@ func (p *PromParser) Series() ([]byte, *int64, float64) { // Histogram returns (nil, nil, nil, nil) for now because the Prometheus text // format does not support sparse histograms yet. -func (p *PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { +func (*PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { return nil, nil, nil, nil } @@ -199,7 +200,7 @@ func (p *PromParser) Help() ([]byte, []byte) { m := p.l.b[p.offsets[0]:p.offsets[1]] // Replacer causes allocations. Replace only when necessary. - if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 { + if bytes.IndexByte(p.text, byte('\\')) >= 0 { return m, []byte(helpReplacer.Replace(string(p.text))) } return m, p.text @@ -215,7 +216,7 @@ func (p *PromParser) Type() ([]byte, model.MetricType) { // Unit returns the metric name and unit in the current entry. // Must only be called after Next returned a unit entry. // The returned byte slices become invalid after the next call to Next. -func (p *PromParser) Unit() ([]byte, []byte) { +func (*PromParser) Unit() ([]byte, []byte) { // The Prometheus format does not have units. return nil, nil } @@ -269,13 +270,13 @@ func (p *PromParser) Labels(l *labels.Labels) { // Exemplar implements the Parser interface. However, since the classic // Prometheus text format does not support exemplars, this implementation simply // returns false and does nothing else. -func (p *PromParser) Exemplar(*exemplar.Exemplar) bool { +func (*PromParser) Exemplar(*exemplar.Exemplar) bool { return false } // CreatedTimestamp returns 0 as it's not implemented yet. // TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 -func (p *PromParser) CreatedTimestamp() int64 { +func (*PromParser) CreatedTimestamp() int64 { return 0 } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go index 2ca6c03af71..b5060a1f33b 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go @@ -299,7 +299,7 @@ func (p *ProtobufParser) Unit() ([]byte, []byte) { // Comment always returns nil because comments aren't supported by the protobuf // format. -func (p *ProtobufParser) Comment() []byte { +func (*ProtobufParser) Comment() []byte { return nil } diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go index d4fb4204cae..8913eddc190 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go @@ -62,6 +62,9 @@ func NewMetricStreamingDecoder(data []byte) *MetricStreamingDecoder { var errInvalidVarint = errors.New("clientpb: invalid varint encountered") +// NextMetricFamily decodes the next metric family from the input without metrics. +// Use NextMetric() to decode metrics. The MetricFamily fields Name, Help and Unit +// are only valid until NextMetricFamily is called again. func (m *MetricStreamingDecoder) NextMetricFamily() error { b := m.in[m.inPos:] if len(b) == 0 { @@ -143,16 +146,17 @@ func (m *MetricStreamingDecoder) resetMetric() { } } -func (m *MetricStreamingDecoder) GetMetric() { +func (*MetricStreamingDecoder) GetMetric() { panic("don't use GetMetric, use Metric directly") } -func (m *MetricStreamingDecoder) GetLabel() { +func (*MetricStreamingDecoder) GetLabel() { panic("don't use GetLabel, use Label instead") } type scratchBuilder interface { Add(name, value string) + UnsafeAddBytes(name, value []byte) } // Label parses labels into labels scratch builder. Metric name is missing @@ -170,9 +174,9 @@ func (m *MetricStreamingDecoder) Label(b scratchBuilder) error { } // parseLabel is essentially LabelPair.Unmarshal but directly adding into scratch builder -// and reusing strings. +// via UnsafeAddBytes method to reuse strings. func parseLabel(dAtA []byte, b scratchBuilder) error { - var name, value string + var name, value []byte l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -231,7 +235,7 @@ func parseLabel(dAtA []byte, b scratchBuilder) error { if postIndex > l { return io.ErrUnexpectedEOF } - name = yoloString(dAtA[iNdEx:postIndex]) + name = dAtA[iNdEx:postIndex] if !model.LabelName(name).IsValid() { return fmt.Errorf("invalid label name: %s", name) } @@ -266,8 +270,8 @@ func parseLabel(dAtA []byte, b scratchBuilder) error { if postIndex > l { return io.ErrUnexpectedEOF } - value = yoloString(dAtA[iNdEx:postIndex]) - if !utf8.ValidString(value) { + value = dAtA[iNdEx:postIndex] + if !utf8.ValidString(yoloString(value)) { return fmt.Errorf("invalid label value: %s", value) } iNdEx = postIndex @@ -289,7 +293,7 @@ func parseLabel(dAtA []byte, b scratchBuilder) error { if iNdEx > l { return io.ErrUnexpectedEOF } - b.Add(name, value) + b.UnsafeAddBytes(name, value) return nil } diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index f5ee591d3b3..866ed279db4 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -1087,7 +1087,7 @@ func (ev *evaluator) errorf(format string, args ...interface{}) { } // error causes a panic with the given error. -func (ev *evaluator) error(err error) { +func (*evaluator) error(err error) { panic(err) } @@ -1228,7 +1228,7 @@ func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annota // function call results. // The prepSeries function (if provided) can be used to prepare the helper // for each series, then passed to each call funcCall. -func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { +func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]Vector, Matrix, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 matrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs)) @@ -1250,8 +1250,7 @@ func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Label } } - vectors := make([]Vector, len(exprs)) // Input vectors for the function. - args := make([]parser.Value, len(exprs)) // Argument to function. + vectors := make([]Vector, len(exprs)) // Input vectors for the function. // Create an output vector that is as big as the input matrix with // the most time series. biggestLen := 1 @@ -1305,7 +1304,6 @@ func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Label sh = seriesHelpers[i] } vectors[i], bh = ev.gatherVector(ts, matrixes[i], vectors[i], bh, sh) - args[i] = vectors[i] if prepSeries != nil { bufHelpers[i] = bh } @@ -1313,7 +1311,7 @@ func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Label // Make the function call. enh.Ts = ts - result, ws := funcCall(args, bufHelpers, enh) + result, ws := funcCall(vectors, nil, bufHelpers, enh) enh.Out = result[:0] // Reuse result vector. warnings.Merge(ws) @@ -1685,8 +1683,8 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, sortedGrouping = append(sortedGrouping, valueLabel.Val) slices.Sort(sortedGrouping) } - return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0].(Vector), enh) + return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0], enh) }, e.Expr) } @@ -1766,22 +1764,18 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, if !matrixArg { // Does not have a matrix argument. - return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, annos := call(v, e.Args, enh) + return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, annos := call(v, nil, e.Args, enh) return vec, warnings.Merge(annos) }, e.Args...) } - inArgs := make([]parser.Value, len(e.Args)) // Evaluate any non-matrix arguments. - otherArgs := make([]Matrix, len(e.Args)) - otherInArgs := make([]Vector, len(e.Args)) + evalVals := make([]Matrix, len(e.Args)) for i, e := range e.Args { if i != matrixArgIndex { val, ws := ev.eval(ctx, e) - otherArgs[i] = val.(Matrix) - otherInArgs[i] = Vector{Sample{}} - inArgs[i] = otherInArgs[i] + evalVals[i] = val.(Matrix) warnings.Merge(ws) } } @@ -1809,7 +1803,6 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, var histograms []HPoint var prevSS *Series inMatrix := make(Matrix, 1) - inArgs[matrixArgIndex] = inMatrix enh := &EvalNodeHelper{Out: make(Vector, 0, 1), enableDelayedNameRemoval: ev.enableDelayedNameRemoval} // Process all the calls for one time series at a time. it := storage.NewBuffer(selRange) @@ -1820,7 +1813,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, // vector functions, the only change needed is to drop the // metric name in the output. dropName := e.Func.Name != "last_over_time" - + vectorVals := make([]Vector, len(e.Args)-1) for i, s := range selVS.Series { if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) @@ -1848,9 +1841,11 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, // Set the non-matrix arguments. // They are scalar, so it is safe to use the step number // when looking up the argument, as there will be no gaps. + counter := 0 for j := range e.Args { if j != matrixArgIndex { - otherInArgs[j][0].F = otherArgs[j][0].Floats[step].F + vectorVals[counter] = Vector{Sample{F: evalVals[j][0].Floats[step].F}} + counter++ } } // Evaluate the matrix selector for this series @@ -1867,8 +1862,9 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, inMatrix[0].Floats = floats inMatrix[0].Histograms = histograms enh.Ts = ts + // Make the function call. - outVec, annos := call(inArgs, e.Args, enh) + outVec, annos := call(vectorVals, inMatrix, e.Args, enh) warnings.Merge(annos) ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+totalHPointSize(histograms))) @@ -1908,7 +1904,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, if ev.enableTypeAndUnitLabels { // When type-and-unit-labels feature is enabled, check __type__ label typeLabel := inMatrix[0].Metric.Get("__type__") - if typeLabel != string(model.MetricTypeCounter) { + if typeLabel != string(model.MetricTypeCounter) && typeLabel != string(model.MetricTypeHistogram) { warnings.Add(annotations.NewPossibleNonCounterLabelInfo(metricName, typeLabel, e.Args[0].PositionRange())) } } else if !strings.HasSuffix(metricName, "_total") && @@ -2002,8 +1998,8 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, case *parser.BinaryExpr: switch lt, rt := e.LHS.Type(), e.RHS.Type(); { case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: - return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) + return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + val := scalarBinop(e.Op, v[0][0].F, v[1][0].F) return append(enh.Out, Sample{F: val}), nil }, e.LHS, e.RHS) case lt == parser.ValueTypeVector && rt == parser.ValueTypeVector: @@ -2015,40 +2011,40 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, } switch e.Op { case parser.LAND: - return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil + return ev.rangeEval(ctx, initSignatures, func(v []Vector, _ Matrix, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.VectorAnd(v[0], v[1], e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LOR: - return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil + return ev.rangeEval(ctx, initSignatures, func(v []Vector, _ Matrix, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.VectorOr(v[0], v[1], e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LUNLESS: - return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil + return ev.rangeEval(ctx, initSignatures, func(v []Vector, _ Matrix, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.VectorUnless(v[0], v[1], e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) default: - return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh, e.PositionRange()) + return ev.rangeEval(ctx, initSignatures, func(v []Vector, _ Matrix, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorBinop(e.Op, v[0], v[1], e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: - return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh, e.PositionRange()) + return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorscalarBinop(e.Op, v[0], Scalar{V: v[1][0].F}, false, e.ReturnBool, enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: - return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh, e.PositionRange()) + return ev.rangeEval(ctx, nil, func(v []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorscalarBinop(e.Op, v[1], Scalar{V: v[0][0].F}, true, e.ReturnBool, enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case *parser.NumberLiteral: span.SetAttributes(attribute.Float64("value", e.Val)) - return ev.rangeEval(ctx, nil, func(_ []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(_ []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) @@ -2219,7 +2215,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1) } - return ev.rangeEval(ctx, nil, func(_ []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(_ []Vector, _ Matrix, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if vs.Timestamp != nil { // This is a special case for "timestamp()" when the @ modifier is used, to ensure that // we return a point for each time step in this case. @@ -2248,7 +2244,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co } } ev.samplesStats.UpdatePeak(ev.currentSamples) - vec, annos := call([]parser.Value{vec}, e.Args, enh) + vec, annos := call([]Vector{vec}, nil, e.Args, enh) return vec, ws.Merge(annos) }) } @@ -2566,7 +2562,7 @@ loop: return floats, histograms } -func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { +func (*evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { if matching.Card != parser.CardManyToMany { panic("set operations must only use many-to-many matching") } @@ -2590,7 +2586,7 @@ func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, return enh.Out } -func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { +func (*evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { switch { case matching.Card != parser.CardManyToMany: panic("set operations must only use many-to-many matching") @@ -2617,7 +2613,7 @@ func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, return enh.Out } -func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { +func (*evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { if matching.Card != parser.CardManyToMany { panic("set operations must only use many-to-many matching") } @@ -3527,7 +3523,7 @@ seriesLoop: // aggregationCountValues evaluates count_values on vec. // Outputs as many series per group as there are values in the input. -func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func (*evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) { type groupCount struct { labels labels.Labels count int @@ -3611,7 +3607,7 @@ func addToSeries(ss *Series, ts int64, f float64, h *histogram.FloatHistogram, n ss.Histograms = append(ss.Histograms, HPoint{T: ts, H: h}) } -func (ev *evaluator) nextValues(ts int64, series *Series) (f float64, h *histogram.FloatHistogram, b bool) { +func (*evaluator) nextValues(ts int64, series *Series) (f float64, h *histogram.FloatHistogram, b bool) { switch { case len(series.Floats) > 0 && series.Floats[0].T == ts: f = series.Floats[0].F @@ -3926,7 +3922,7 @@ func NewHashRatioSampler() *HashRatioSampler { return &HashRatioSampler{} } -func (s *HashRatioSampler) sampleOffset(_ int64, sample *Sample) float64 { +func (*HashRatioSampler) sampleOffset(_ int64, sample *Sample) float64 { const ( float64MaxUint64 = float64(math.MaxUint64) ) diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index 2577e7f27b5..fe5227312ff 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -56,10 +56,10 @@ import ( // metrics, the timestamp are not needed. // // Scalar results should be returned as the value of a sample in a Vector. -type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) +type FunctionCall func(vectorVals []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) // === time() float64 === -func funcTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{ F: float64(enh.Ts) / 1000, }}, nil @@ -69,11 +69,11 @@ func funcTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vect // It calculates the rate (allowing for counter resets if isCounter is true), // extrapolates if the first/last sample is close to the boundary, and returns // the result as either per-second (if isRate is true) or overall. -func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) { +func extrapolatedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) { ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( - samples = vals[0].(Matrix)[0] + samples = vals[0] rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) resultFloat float64 @@ -288,33 +288,33 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra } // === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return extrapolatedRate(vals, args, enh, false, false) +func funcDelta(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return extrapolatedRate(matrixVals, args, enh, false, false) } // === rate(node parser.ValueTypeMatrix) (Vector, Annotations) === -func funcRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return extrapolatedRate(vals, args, enh, true, true) +func funcRate(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return extrapolatedRate(matrixVals, args, enh, true, true) } // === increase(node parser.ValueTypeMatrix) (Vector, Annotations) === -func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return extrapolatedRate(vals, args, enh, true, false) +func funcIncrease(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return extrapolatedRate(matrixVals, args, enh, true, false) } // === irate(node parser.ValueTypeMatrix) (Vector, Annotations) === -func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return instantValue(vals, args, enh.Out, true) +func funcIrate(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return instantValue(matrixVals, args, enh.Out, true) } // === idelta(node model.ValMatrix) (Vector, Annotations) === -func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return instantValue(vals, args, enh.Out, false) +func funcIdelta(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return instantValue(matrixVals, args, enh.Out, false) } -func instantValue(vals []parser.Value, args parser.Expressions, out Vector, isRate bool) (Vector, annotations.Annotations) { +func instantValue(vals Matrix, args parser.Expressions, out Vector, isRate bool) (Vector, annotations.Annotations) { var ( - samples = vals[0].(Matrix)[0] + samples = vals[0] metricName = samples.Metric.Get(labels.MetricName) ss = make([]Sample, 0, 2) annos annotations.Annotations @@ -441,14 +441,14 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // affects how trends in historical data will affect the current data. A higher // trend factor increases the influence. of trends. Algorithm taken from // https://en.wikipedia.org/wiki/Exponential_smoothing . -func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] +func funcDoubleExponentialSmoothing(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + samples := matrixVal[0] metricName := samples.Metric.Get(labels.MetricName) // The smoothing factor argument. - sf := vals[1].(Vector)[0].F + sf := vectorVals[0][0].F // The trend factor argument. - tf := vals[2].(Vector)[0].F + tf := vectorVals[1][0].F // Check that the input parameters are valid. if sf <= 0 || sf >= 1 { @@ -504,27 +504,27 @@ func filterFloats(v Vector) Vector { } // === sort(node parser.ValueTypeVector) (Vector, Annotations) === -func funcSort(vals []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSort(vectorVals []Vector, _ Matrix, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take descending sort with NaN first and // reverse it. - byValueSorter := vectorByReverseValueHeap(filterFloats(vals[0].(Vector))) + byValueSorter := vectorByReverseValueHeap(filterFloats(vectorVals[0])) sort.Sort(sort.Reverse(byValueSorter)) return Vector(byValueSorter), nil } // === sortDesc(node parser.ValueTypeVector) (Vector, Annotations) === -func funcSortDesc(vals []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortDesc(vectorVals []Vector, _ Matrix, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take ascending sort with NaN first and // reverse it. - byValueSorter := vectorByValueHeap(filterFloats(vals[0].(Vector))) + byValueSorter := vectorByValueHeap(filterFloats(vectorVals[0])) sort.Sort(sort.Reverse(byValueSorter)) return Vector(byValueSorter), nil } // === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === -func funcSortByLabel(vals []parser.Value, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortByLabel(vectorVals []Vector, _ Matrix, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { lbls := stringSliceFromArgs(args[1:]) - slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { + slices.SortFunc(vectorVals[0], func(a, b Sample) int { for _, label := range lbls { lv1 := a.Metric.Get(label) lv2 := b.Metric.Get(label) @@ -544,13 +544,13 @@ func funcSortByLabel(vals []parser.Value, args parser.Expressions, _ *EvalNodeHe return labels.Compare(a.Metric, b.Metric) }) - return vals[0].(Vector), nil + return vectorVals[0], nil } // === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === -func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortByLabelDesc(vectorVals []Vector, _ Matrix, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { lbls := stringSliceFromArgs(args[1:]) - slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { + slices.SortFunc(vectorVals[0], func(a, b Sample) int { for _, label := range lbls { lv1 := a.Metric.Get(label) lv2 := b.Metric.Get(label) @@ -570,7 +570,7 @@ func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, _ *EvalNo return -labels.Compare(a.Metric, b.Metric) }) - return vals[0].(Vector), nil + return vectorVals[0], nil } func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, annotations.Annotations) { @@ -595,46 +595,46 @@ func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, ann } // === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === -func funcClamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vals[0].(Vector) - minVal := vals[1].(Vector)[0].F - maxVal := vals[2].(Vector)[0].F +func funcClamp(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vectorVals[0] + minVal := vectorVals[1][0].F + maxVal := vectorVals[2][0].F return clamp(vec, minVal, maxVal, enh) } // === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === -func funcClampMax(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vals[0].(Vector) - maxVal := vals[1].(Vector)[0].F +func funcClampMax(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vectorVals[0] + maxVal := vectorVals[1][0].F return clamp(vec, math.Inf(-1), maxVal, enh) } // === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === -func funcClampMin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vals[0].(Vector) - minVal := vals[1].(Vector)[0].F +func funcClampMin(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vectorVals[0] + minVal := vectorVals[1][0].F return clamp(vec, minVal, math.Inf(+1), enh) } // === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) === -func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcRound(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { // round returns a number rounded to toNearest. // Ties are solved by rounding up. toNearest := float64(1) if len(args) >= 2 { - toNearest = vals[1].(Vector)[0].F + toNearest = vectorVals[1][0].F } // Invert as it seems to cause fewer floating point accuracy issues. toNearestInverse := 1.0 / toNearest - return simpleFloatFunc(vals, enh, func(f float64) float64 { + return simpleFloatFunc(vectorVals, enh, func(f float64) float64 { return math.Floor(f*toNearestInverse+0.5) / toNearestInverse }), nil } // === Scalar(node parser.ValueTypeVector) Scalar === -func funcScalar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcScalar(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { var ( - v = vals[0].(Vector) + v = vectorVals[0] value float64 found bool ) @@ -656,22 +656,22 @@ func funcScalar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) return append(enh.Out, Sample{F: value}), nil } -func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { - el := vals[0].(Matrix)[0] +func aggrOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { + el := matrixVal[0] return append(enh.Out, Sample{F: aggrFn(el)}) } -func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) { - el := vals[0].(Matrix)[0] +func aggrHistOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) { + el := matrixVal[0] res, err := aggrFn(el) return append(enh.Out, Sample{H: res}), err } // === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - firstSeries := vals[0].(Matrix)[0] +func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + firstSeries := matrixVal[0] if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 { metricName := firstSeries.Metric.Get(labels.MetricName) return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) @@ -700,7 +700,7 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // the current implementation is accurate enough for practical purposes. if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. - vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) { + vec, err := aggrHistOverTime(matrixVal, enh, func(s Series) (*histogram.FloatHistogram, error) { mean := s.Histograms[0].H.Copy() for i, h := range s.Histograms[1:] { count := float64(i + 2) @@ -727,7 +727,7 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } return vec, nil } - return aggrOverTime(vals, enh, func(s Series) float64 { + return aggrOverTime(matrixVal, enh, func(s Series) float64 { var ( // Pre-set the 1st sample to start the loop with the 2nd. sum, count = s.Floats[0].F, 1. @@ -761,15 +761,15 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } // === count_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcCountOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return aggrOverTime(vals, enh, func(s Series) float64 { +func funcCountOverTime(_ []Vector, matrixVals Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return aggrOverTime(matrixVals, enh, func(s Series) float64 { return float64(len(s.Floats) + len(s.Histograms)) }), nil } // === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - el := vals[0].(Matrix)[0] +func funcLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + el := matrixVal[0] var f FPoint if len(el.Floats) > 0 { @@ -794,8 +794,8 @@ func funcLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHe } // === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] +func funcMadOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + samples := matrixVal[0] var annos annotations.Annotations if len(samples.Floats) == 0 { return enh.Out, nil @@ -804,7 +804,7 @@ func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode metricName := samples.Metric.Get(labels.MetricName) annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) } - return aggrOverTime(vals, enh, func(s Series) float64 { + return aggrOverTime(matrixVal, enh, func(s Series) float64 { values := make(vectorByValueHeap, 0, len(s.Floats)) for _, f := range s.Floats { values = append(values, Sample{F: f.F}) @@ -819,8 +819,8 @@ func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } // === ts_of_last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcTsOfLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - el := vals[0].(Matrix)[0] +func funcTsOfLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + el := matrixVal[0] var tf int64 if len(el.Floats) > 0 { @@ -839,22 +839,22 @@ func funcTsOfLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNo } // === ts_of_max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcTsOfMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { +func funcTsOfMaxOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(matrixVal, args, enh, func(cur, maxVal float64) bool { return (cur >= maxVal) || math.IsNaN(maxVal) }, true) } // === ts_of_min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcTsOfMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { +func funcTsOfMinOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(matrixVals, args, enh, func(cur, maxVal float64) bool { return (cur <= maxVal) || math.IsNaN(maxVal) }, true) } // compareOverTime is a helper used by funcMaxOverTime and funcMinOverTime. -func compareOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, compareFn func(float64, float64) bool, returnTimestamp bool) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] +func compareOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper, compareFn func(float64, float64) bool, returnTimestamp bool) (Vector, annotations.Annotations) { + samples := matrixVal[0] var annos annotations.Annotations if len(samples.Floats) == 0 { return enh.Out, nil @@ -863,7 +863,7 @@ func compareOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode metricName := samples.Metric.Get(labels.MetricName) annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) } - return aggrOverTime(vals, enh, func(s Series) float64 { + return aggrOverTime(matrixVal, enh, func(s Series) float64 { maxVal := s.Floats[0].F tsOfMax := s.Floats[0].T for _, f := range s.Floats { @@ -880,29 +880,29 @@ func compareOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } // === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { +func funcMaxOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(matrixVals, args, enh, func(cur, maxVal float64) bool { return (cur > maxVal) || math.IsNaN(maxVal) }, false) } // === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { +func funcMinOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(matrixVals, args, enh, func(cur, maxVal float64) bool { return (cur < maxVal) || math.IsNaN(maxVal) }, false) } // === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - firstSeries := vals[0].(Matrix)[0] +func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + firstSeries := matrixVal[0] if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 { metricName := firstSeries.Metric.Get(labels.MetricName) return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) } if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. - vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) { + vec, err := aggrHistOverTime(matrixVal, enh, func(s Series) (*histogram.FloatHistogram, error) { sum := s.Histograms[0].H.Copy() for _, h := range s.Histograms[1:] { _, err := sum.Add(h.H) @@ -922,7 +922,7 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } return vec, nil } - return aggrOverTime(vals, enh, func(s Series) float64 { + return aggrOverTime(matrixVal, enh, func(s Series) float64 { var sum, c float64 for _, f := range s.Floats { sum, c = kahanSumInc(f.F, sum, c) @@ -935,9 +935,9 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } // === quantile_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - q := vals[0].(Vector)[0].F - el := vals[1].(Matrix)[0] +func funcQuantileOverTime(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + q := vectorVals[0][0].F + el := matrixVal[0] if len(el.Floats) == 0 { return enh.Out, nil } @@ -957,8 +957,8 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva return append(enh.Out, Sample{F: quantile(q, values)}), annos } -func varianceOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] +func varianceOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { + samples := matrixVal[0] var annos annotations.Annotations if len(samples.Floats) == 0 { return enh.Out, nil @@ -967,7 +967,7 @@ func varianceOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod metricName := samples.Metric.Get(labels.MetricName) annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) } - return aggrOverTime(vals, enh, func(s Series) float64 { + return aggrOverTime(matrixVal, enh, func(s Series) float64 { var count float64 var mean, cMean float64 var aux, cAux float64 @@ -986,18 +986,18 @@ func varianceOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod } // === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return varianceOverTime(vals, args, enh, math.Sqrt) +func funcStddevOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return varianceOverTime(matrixVals, args, enh, math.Sqrt) } // === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return varianceOverTime(vals, args, enh, nil) +func funcStdvarOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return varianceOverTime(matrixVals, args, enh, nil) } // === absent(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Vector)) > 0 { +func funcAbsent(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + if len(vectorVals[0]) > 0 { return enh.Out, nil } return append(enh.Out, @@ -1012,19 +1012,19 @@ func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe // This function will return 1 if the matrix has at least one element. // Due to engine optimization, this function is only called when this condition is true. // Then, the engine post-processes the results to get the expected output. -func funcAbsentOverTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAbsentOverTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: 1}), nil } // === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) === -func funcPresentOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return aggrOverTime(vals, enh, func(_ Series) float64 { +func funcPresentOverTime(_ []Vector, matrixVals Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return aggrOverTime(matrixVals, enh, func(Series) float64 { return 1 }), nil } -func simpleFloatFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { - for _, el := range vals[0].(Vector) { +func simpleFloatFunc(vectorVals []Vector, enh *EvalNodeHelper, f func(float64) float64) Vector { + for _, el := range vectorVals[0] { if el.H == nil { // Process only float samples. if !enh.enableDelayedNameRemoval { el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel) @@ -1040,127 +1040,127 @@ func simpleFloatFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) f } // === abs(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAbs(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Abs), nil +func funcAbs(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Abs), nil } // === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCeil(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Ceil), nil +func funcCeil(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Ceil), nil } // === floor(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcFloor(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Floor), nil +func funcFloor(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Floor), nil } // === exp(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcExp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Exp), nil +func funcExp(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Exp), nil } // === sqrt(Vector VectorNode) (Vector, Annotations) === -func funcSqrt(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Sqrt), nil +func funcSqrt(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Sqrt), nil } // === ln(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Log), nil +func funcLn(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Log), nil } // === log2(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLog2(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Log2), nil +func funcLog2(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Log2), nil } // === log10(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLog10(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Log10), nil +func funcLog10(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Log10), nil } // === sin(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Sin), nil +func funcSin(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Sin), nil } // === cos(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Cos), nil +func funcCos(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Cos), nil } // === tan(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Tan), nil +func funcTan(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Tan), nil } // === asin(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAsin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Asin), nil +func funcAsin(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Asin), nil } // === acos(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAcos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Acos), nil +func funcAcos(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Acos), nil } // === atan(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAtan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Atan), nil +func funcAtan(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Atan), nil } // === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Sinh), nil +func funcSinh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Sinh), nil } // === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Cosh), nil +func funcCosh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Cosh), nil } // === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Tanh), nil +func funcTanh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Tanh), nil } // === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAsinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Asinh), nil +func funcAsinh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Asinh), nil } // === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAcosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Acosh), nil +func funcAcosh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Acosh), nil } // === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAtanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, math.Atanh), nil +func funcAtanh(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, math.Atanh), nil } // === rad(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcRad(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, func(v float64) float64 { +func funcRad(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, func(v float64) float64 { return v * math.Pi / 180 }), nil } // === deg(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcDeg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, func(v float64) float64 { +func funcDeg(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, func(v float64) float64 { return v * 180 / math.Pi }), nil } // === pi() Scalar === -func funcPi(_ []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcPi([]Vector, Matrix, parser.Expressions, *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{F: math.Pi}}, nil } // === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSgn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFloatFunc(vals, enh, func(v float64) float64 { +func funcSgn(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vectorVals, enh, func(v float64) float64 { switch { case v < 0: return -1 @@ -1173,8 +1173,8 @@ func funcSgn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Ve } // === timestamp(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTimestamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vals[0].(Vector) +func funcTimestamp(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vectorVals[0] for _, el := range vec { if !enh.enableDelayedNameRemoval { el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel) @@ -1188,6 +1188,9 @@ func funcTimestamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelpe return enh.Out, nil } +// We get incorrect results if this function is inlined; see https://github.com/prometheus/prometheus/issues/16714. +// +//go:noinline func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { t := sum + inc switch { @@ -1250,8 +1253,8 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f } // === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) === -func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] +func funcDeriv(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + samples := matrixVal[0] metricName := samples.Metric.Get(labels.MetricName) // No sense in trying to compute a derivative without at least two float points. @@ -1275,9 +1278,9 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper } // === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) (Vector, Annotations) === -func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] - duration := vals[1].(Vector)[0].F +func funcPredictLinear(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + samples := matrixVal[0] + duration := vectorVals[0][0].F metricName := samples.Metric.Get(labels.MetricName) // No sense in trying to predict anything without at least two float points. @@ -1297,8 +1300,8 @@ func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNo return append(enh.Out, Sample{F: slope*duration + intercept}), nil } -func simpleHistogramFunc(vals []parser.Value, enh *EvalNodeHelper, f func(h *histogram.FloatHistogram) float64) Vector { - for _, el := range vals[0].(Vector) { +func simpleHistogramFunc(vectorVals []Vector, enh *EvalNodeHelper, f func(h *histogram.FloatHistogram) float64) Vector { + for _, el := range vectorVals[0] { if el.H != nil { // Process only histogram samples. if !enh.enableDelayedNameRemoval { el.Metric = el.Metric.DropMetricName() @@ -1314,28 +1317,28 @@ func simpleHistogramFunc(vals []parser.Value, enh *EvalNodeHelper, f func(h *his } // === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramCount(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { +func funcHistogramCount(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 { return h.Count }), nil } // === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramSum(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { +func funcHistogramSum(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 { return h.Sum }), nil } // === histogram_avg(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramAvg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { +func funcHistogramAvg(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 { return h.Sum / h.Count }), nil } -func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { - return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { +func histogramVariance(vectorVals []Vector, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vectorVals, enh, func(h *histogram.FloatHistogram) float64 { mean := h.Sum / h.Count var variance, cVariance float64 it := h.AllBucketIterator() @@ -1372,20 +1375,20 @@ func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResul } // === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramStdDev(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return histogramVariance(vals, enh, math.Sqrt) +func funcHistogramStdDev(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return histogramVariance(vectorVals, enh, math.Sqrt) } // === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramStdVar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return histogramVariance(vals, enh, nil) +func funcHistogramStdVar(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return histogramVariance(vectorVals, enh, nil) } // === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - lower := vals[0].(Vector)[0].F - upper := vals[1].(Vector)[0].F - inVec := vals[2].(Vector) +func funcHistogramFraction(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + lower := vectorVals[0][0].F + upper := vectorVals[1][0].F + inVec := vectorVals[2] annos := enh.resetHistograms(inVec, args[2]) @@ -1427,9 +1430,9 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev } // === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - q := vals[0].(Vector)[0].F - inVec := vals[1].(Vector) +func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + q := vectorVals[0][0].F + inVec := vectorVals[1] var annos annotations.Annotations if math.IsNaN(q) || q < 0 || q > 1 { @@ -1479,9 +1482,9 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev } // === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcResets(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - floats := vals[0].(Matrix)[0].Floats - histograms := vals[0].(Matrix)[0].Histograms +func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + floats := matrixVal[0].Floats + histograms := matrixVal[0].Histograms resets := 0 if len(floats) == 0 && len(histograms) == 0 { return enh.Out, nil @@ -1524,9 +1527,9 @@ func funcResets(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) } // === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcChanges(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - floats := vals[0].(Matrix)[0].Floats - histograms := vals[0].(Matrix)[0].Histograms +func funcChanges(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + floats := matrixVal[0].Floats + histograms := matrixVal[0].Histograms changes := 0 if len(floats) == 0 && len(histograms) == 0 { return enh.Out, nil @@ -1612,11 +1615,11 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio } // === Vector(s Scalar) (Vector, Annotations) === -func funcVector(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcVector(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{ Metric: labels.Labels{}, - F: vals[0].(Vector)[0].F, + F: vectorVals[0][0].F, }), nil } @@ -1666,8 +1669,8 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) } // Common code for date related functions. -func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector { - if len(vals) == 0 { +func dateWrapper(vectorVals []Vector, enh *EvalNodeHelper, f func(time.Time) float64) Vector { + if len(vectorVals) == 0 { return append(enh.Out, Sample{ Metric: labels.Labels{}, @@ -1675,7 +1678,7 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo }) } - for _, el := range vals[0].(Vector) { + for _, el := range vectorVals[0] { if el.H != nil { // Ignore histogram sample. continue @@ -1694,57 +1697,57 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo } // === days_in_month(v Vector) Scalar === -func funcDaysInMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcDaysInMonth(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day()) }), nil } // === day_of_month(v Vector) Scalar === -func funcDayOfMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcDayOfMonth(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(t.Day()) }), nil } // === day_of_week(v Vector) Scalar === -func funcDayOfWeek(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcDayOfWeek(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(t.Weekday()) }), nil } // === day_of_year(v Vector) Scalar === -func funcDayOfYear(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcDayOfYear(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(t.YearDay()) }), nil } // === hour(v Vector) Scalar === -func funcHour(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcHour(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(t.Hour()) }), nil } // === minute(v Vector) Scalar === -func funcMinute(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcMinute(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(t.Minute()) }), nil } // === month(v Vector) Scalar === -func funcMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcMonth(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(t.Month()) }), nil } // === year(v Vector) Scalar === -func funcYear(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return dateWrapper(vals, enh, func(t time.Time) float64 { +func funcYear(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return dateWrapper(vectorVals, enh, func(t time.Time) float64 { return float64(t.Year()) }), nil } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go index dc3e36b5b58..ef9b33d6f1a 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go @@ -243,15 +243,15 @@ func (TestStmt) PositionRange() posrange.PositionRange { End: -1, } } -func (e *AggregateExpr) Type() ValueType { return ValueTypeVector } -func (e *Call) Type() ValueType { return e.Func.ReturnType } -func (e *MatrixSelector) Type() ValueType { return ValueTypeMatrix } -func (e *SubqueryExpr) Type() ValueType { return ValueTypeMatrix } -func (e *NumberLiteral) Type() ValueType { return ValueTypeScalar } -func (e *ParenExpr) Type() ValueType { return e.Expr.Type() } -func (e *StringLiteral) Type() ValueType { return ValueTypeString } -func (e *UnaryExpr) Type() ValueType { return e.Expr.Type() } -func (e *VectorSelector) Type() ValueType { return ValueTypeVector } +func (*AggregateExpr) Type() ValueType { return ValueTypeVector } +func (e *Call) Type() ValueType { return e.Func.ReturnType } +func (*MatrixSelector) Type() ValueType { return ValueTypeMatrix } +func (*SubqueryExpr) Type() ValueType { return ValueTypeMatrix } +func (*NumberLiteral) Type() ValueType { return ValueTypeScalar } +func (e *ParenExpr) Type() ValueType { return e.Expr.Type() } +func (*StringLiteral) Type() ValueType { return ValueTypeString } +func (e *UnaryExpr) Type() ValueType { return e.Expr.Type() } +func (*VectorSelector) Type() ValueType { return ValueTypeVector } func (e *BinaryExpr) Type() ValueType { if e.LHS.Type() == ValueTypeScalar && e.RHS.Type() == ValueTypeScalar { return ValueTypeScalar @@ -259,7 +259,7 @@ func (e *BinaryExpr) Type() ValueType { return ValueTypeVector } func (e *StepInvariantExpr) Type() ValueType { return e.Expr.Type() } -func (e *DurationExpr) Type() ValueType { return ValueTypeScalar } +func (*DurationExpr) Type() ValueType { return ValueTypeScalar } func (*AggregateExpr) PromQLExpr() {} func (*BinaryExpr) PromQLExpr() {} diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index e7e16cd0330..474eb74d1a6 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -244,23 +244,9 @@ expr : */ aggregate_expr : aggregate_op aggregate_modifier function_call_body - { - // Need to consume the position of the first RIGHT_PAREN. It might not exist on garbage input - // like 'sum (some_metric) by test' - if len(yylex.(*parser).closingParens) > 1 { - yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:] - } - $$ = yylex.(*parser).newAggregateExpr($1, $2, $3) - } + { $$ = yylex.(*parser).newAggregateExpr($1, $2, $3) } | aggregate_op function_call_body aggregate_modifier - { - // Need to consume the position of the first RIGHT_PAREN. It might not exist on garbage input - // like 'sum by test (some_metric)' - if len(yylex.(*parser).closingParens) > 1 { - yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:] - } - $$ = yylex.(*parser).newAggregateExpr($1, $3, $2) - } + { $$ = yylex.(*parser).newAggregateExpr($1, $3, $2) } | aggregate_op function_call_body { $$ = yylex.(*parser).newAggregateExpr($1, &AggregateExpr{}, $2) } | aggregate_op error @@ -414,10 +400,9 @@ function_call : IDENTIFIER function_call_body Args: $2.(Expressions), PosRange: posrange.PositionRange{ Start: $1.Pos, - End: yylex.(*parser).closingParens[0], + End: yylex.(*parser).lastClosing, }, } - yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:] } ; @@ -443,10 +428,7 @@ function_call_args: function_call_args COMMA expr */ paren_expr : LEFT_PAREN expr RIGHT_PAREN - { - $$ = &ParenExpr{Expr: $2.(Expr), PosRange: mergeRanges(&$1, &$3)} - yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:] - } + { $$ = &ParenExpr{Expr: $2.(Expr), PosRange: mergeRanges(&$1, &$3)} } ; /* diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go index e93d1b3de6b..7037245a64c 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go @@ -1122,21 +1122,11 @@ yydefault: case 21: yyDollar = yyS[yypt-3 : yypt+1] { - // Need to consume the position of the first RIGHT_PAREN. It might not exist on garbage input - // like 'sum (some_metric) by test' - if len(yylex.(*parser).closingParens) > 1 { - yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:] - } yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[2].node, yyDollar[3].node) } case 22: yyDollar = yyS[yypt-3 : yypt+1] { - // Need to consume the position of the first RIGHT_PAREN. It might not exist on garbage input - // like 'sum by test (some_metric)' - if len(yylex.(*parser).closingParens) > 1 { - yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:] - } yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[3].node, yyDollar[2].node) } case 23: @@ -1364,10 +1354,9 @@ yydefault: Args: yyDollar[2].node.(Expressions), PosRange: posrange.PositionRange{ Start: yyDollar[1].item.Pos, - End: yylex.(*parser).closingParens[0], + End: yylex.(*parser).lastClosing, }, } - yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:] } case 63: yyDollar = yyS[yypt-3 : yypt+1] @@ -1399,7 +1388,6 @@ yydefault: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &ParenExpr{Expr: yyDollar[2].node.(Expr), PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item)} - yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:] } case 69: yyDollar = yyS[yypt-1 : yypt+1] diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go index e99f5f4570d..013e3f321b9 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go @@ -59,13 +59,6 @@ type parser struct { // Everytime an Item is lexed that could be the end // of certain expressions its end position is stored here. lastClosing posrange.Pos - // Keep track of closing parentheses in addition, because sometimes the - // parser needs to read past a closing parenthesis to find the end of an - // expression, e.g. reading ony '(sum(foo)' cannot tell the end of the - // aggregation expression, since it could continue with either - // '(sum(foo))' or '(sum(foo) by (bar))' by which time we set lastClosing - // to the last paren. - closingParens []posrange.Pos yyParser yyParserImpl @@ -89,7 +82,7 @@ func NewParser(input string, opts ...Opt) *parser { //nolint:revive // unexporte p.injecting = false p.parseErrors = nil p.generatedParserResult = nil - p.closingParens = make([]posrange.Pos, 0) + p.lastClosing = posrange.Pos(0) // Clear lexer struct before reusing. p.lex = Lexer{ @@ -179,11 +172,6 @@ func EnrichParseError(err error, enrich func(parseErr *ParseErr)) { func ParseExpr(input string) (expr Expr, err error) { p := NewParser(input) defer p.Close() - - if len(p.closingParens) > 0 { - return nil, fmt.Errorf("internal parser error, not all closing parens consumed: %v", p.closingParens) - } - return p.ParseExpr() } @@ -334,7 +322,7 @@ func (p *parser) unexpected(context, expected string) { var errUnexpected = errors.New("unexpected error") // recover is the handler that turns panics into returns from the top level of Parse. -func (p *parser) recover(errp *error) { +func (*parser) recover(errp *error) { e := recover() switch _, ok := e.(runtime.Error); { case ok: @@ -387,10 +375,7 @@ func (p *parser) Lex(lval *yySymType) int { case EOF: lval.item.Typ = EOF p.InjectItem(0) - case RIGHT_PAREN: - p.closingParens = append(p.closingParens, lval.item.Pos+posrange.Pos(len(lval.item.Val))) - fallthrough - case RIGHT_BRACE, RIGHT_BRACKET, DURATION, NUMBER: + case RIGHT_BRACE, RIGHT_PAREN, RIGHT_BRACKET, DURATION, NUMBER: p.lastClosing = lval.item.Pos + posrange.Pos(len(lval.item.Val)) } @@ -402,7 +387,7 @@ func (p *parser) Lex(lval *yySymType) int { // It is a no-op since the parsers error routines are triggered // by mechanisms that allow more fine-grained control // For more information, see https://pkg.go.dev/golang.org/x/tools/cmd/goyacc. -func (p *parser) Error(string) { +func (*parser) Error(string) { } // InjectItem allows injecting a single Item at the beginning of the token stream @@ -425,7 +410,7 @@ func (p *parser) InjectItem(typ ItemType) { p.injecting = true } -func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers, rhs Node) *BinaryExpr { +func (*parser) newBinaryExpression(lhs Node, op Item, modifiers, rhs Node) *BinaryExpr { ret := modifiers.(*BinaryExpr) ret.LHS = lhs.(Expr) @@ -435,7 +420,7 @@ func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers, rhs Node) *Bi return ret } -func (p *parser) assembleVectorSelector(vs *VectorSelector) { +func (*parser) assembleVectorSelector(vs *VectorSelector) { // If the metric name was set outside the braces, add a matcher for it. // If the metric name was inside the braces we don't need to do anything. if vs.Name != "" { @@ -451,16 +436,10 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateE ret = modifier.(*AggregateExpr) arguments := args.(Expressions) - if len(p.closingParens) == 0 { - // Prevents invalid array accesses. - // The error is already captured by the parser. - return - } ret.PosRange = posrange.PositionRange{ Start: op.Pos, - End: p.closingParens[0], + End: p.lastClosing, } - p.closingParens = p.closingParens[1:] ret.Op = op.Typ @@ -493,7 +472,7 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateE } // newMap is used when building the FloatHistogram from a map. -func (p *parser) newMap() (ret map[string]interface{}) { +func (*parser) newMap() (ret map[string]interface{}) { return map[string]interface{}{} } @@ -522,7 +501,7 @@ func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, t }) } -func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64, +func (*parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64, combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) (*histogram.FloatHistogram, error), ) ([]SequenceValue, error) { ret := make([]SequenceValue, times+1) diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go b/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go index eefa3f490be..54726be4fe7 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go @@ -105,7 +105,7 @@ func (e *Call) Pretty(level int) string { return s } -func (e *EvalStmt) Pretty(_ int) string { +func (e *EvalStmt) Pretty(int) string { return "EVAL " + e.Expr.String() } diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go b/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go index 1754f6635d1..654a94db35b 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go @@ -314,7 +314,7 @@ func validateExpectedCmds(cmd *evalCmd) error { return nil } -func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { +func (*test) parseEval(lines []string, i int) (int, *evalCmd, error) { instantParts := patEvalInstant.FindStringSubmatch(lines[i]) rangeParts := patEvalRange.FindStringSubmatch(lines[i]) @@ -532,7 +532,7 @@ func newLoadCmd(gap time.Duration, withNHCB bool) *loadCmd { } } -func (cmd loadCmd) String() string { +func (loadCmd) String() string { return "load" } @@ -795,7 +795,7 @@ func newRangeEvalCmd(expr string, start, end time.Time, step time.Duration, line } } -func (ev *evalCmd) String() string { +func (*evalCmd) String() string { return "eval" } @@ -1195,7 +1195,7 @@ func HistogramTestExpression(h *histogram.FloatHistogram) string { // clearCmd is a command that wipes the test's storage state. type clearCmd struct{} -func (cmd clearCmd) String() string { +func (clearCmd) String() string { return "clear" } diff --git a/vendor/github.com/prometheus/prometheus/promql/value.go b/vendor/github.com/prometheus/prometheus/promql/value.go index 2e387117e51..c2db0833ee0 100644 --- a/vendor/github.com/prometheus/prometheus/promql/value.go +++ b/vendor/github.com/prometheus/prometheus/promql/value.go @@ -471,7 +471,7 @@ func (ssi *storageSeriesIterator) At() (t int64, v float64) { return ssi.currT, ssi.currF } -func (ssi *storageSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) { +func (*storageSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) { panic(errors.New("storageSeriesIterator: AtHistogram not supported")) } @@ -535,7 +535,7 @@ func (ssi *storageSeriesIterator) Next() chunkenc.ValueType { } } -func (ssi *storageSeriesIterator) Err() error { +func (*storageSeriesIterator) Err() error { return nil } diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index 7cbe3ce15ab..ecebc8de546 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -465,7 +465,7 @@ type RuleDependencyController interface { type ruleDependencyController struct{} // AnalyseRules implements RuleDependencyController. -func (c ruleDependencyController) AnalyseRules(rules []Rule) { +func (ruleDependencyController) AnalyseRules(rules []Rule) { depMap := buildDependencyMap(rules) if depMap == nil { @@ -509,11 +509,11 @@ func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyControlle } } -func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) bool { +func (c *concurrentRuleEvalController) Allow(context.Context, *Group, Rule) bool { return c.sema.TryAcquire(1) } -func (c *concurrentRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules { +func (*concurrentRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules { // Using the rule dependency controller information (rules being identified as having no dependencies or no dependants), // we can safely run the following concurrent groups: // 1. Concurrently, all rules that have no dependencies @@ -549,7 +549,7 @@ func (c *concurrentRuleEvalController) SplitGroupIntoBatches(_ context.Context, return order } -func (c *concurrentRuleEvalController) Done(_ context.Context) { +func (c *concurrentRuleEvalController) Done(context.Context) { c.sema.Release(1) } @@ -558,15 +558,15 @@ var _ RuleConcurrencyController = &sequentialRuleEvalController{} // sequentialRuleEvalController is a RuleConcurrencyController that runs every rule sequentially. type sequentialRuleEvalController struct{} -func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) bool { +func (sequentialRuleEvalController) Allow(context.Context, *Group, Rule) bool { return false } -func (c sequentialRuleEvalController) SplitGroupIntoBatches(_ context.Context, _ *Group) []ConcurrentRules { +func (sequentialRuleEvalController) SplitGroupIntoBatches(context.Context, *Group) []ConcurrentRules { return nil } -func (c sequentialRuleEvalController) Done(_ context.Context) {} +func (sequentialRuleEvalController) Done(context.Context) {} // FromMaps returns new sorted Labels from the given maps, overriding each other in order. func FromMaps(maps ...map[string]string) labels.Labels { diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index b4f34a6f5b3..75fb6105e1d 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -474,7 +474,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { for _, t := range targets { // Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage. nonEmpty := false - t.LabelsRange(func(_ labels.Label) { nonEmpty = true }) + t.LabelsRange(func(labels.Label) { nonEmpty = true }) switch { case nonEmpty: all = append(all, t) @@ -1116,7 +1116,7 @@ func (c *scrapeCache) setType(mfName []byte, t model.MetricType) ([]byte, *metaE c.metaMtx.Lock() defer c.metaMtx.Unlock() - e, ok := c.metadata[yoloString(mfName)] + e, ok := c.metadata[string(mfName)] if !ok { e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} c.metadata[string(mfName)] = e @@ -1133,7 +1133,7 @@ func (c *scrapeCache) setHelp(mfName, help []byte) ([]byte, *metaEntry) { c.metaMtx.Lock() defer c.metaMtx.Unlock() - e, ok := c.metadata[yoloString(mfName)] + e, ok := c.metadata[string(mfName)] if !ok { e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} c.metadata[string(mfName)] = e @@ -1150,7 +1150,7 @@ func (c *scrapeCache) setUnit(mfName, unit []byte) ([]byte, *metaEntry) { c.metaMtx.Lock() defer c.metaMtx.Unlock() - e, ok := c.metadata[yoloString(mfName)] + e, ok := c.metadata[string(mfName)] if !ok { e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} c.metadata[string(mfName)] = e diff --git a/vendor/github.com/prometheus/prometheus/storage/buffer.go b/vendor/github.com/prometheus/prometheus/storage/buffer.go index e847c10e61a..bc27948fd07 100644 --- a/vendor/github.com/prometheus/prometheus/storage/buffer.go +++ b/vendor/github.com/prometheus/prometheus/storage/buffer.go @@ -175,15 +175,15 @@ func (s fSample) F() float64 { return s.f } -func (s fSample) H() *histogram.Histogram { +func (fSample) H() *histogram.Histogram { panic("H() called for fSample") } -func (s fSample) FH() *histogram.FloatHistogram { +func (fSample) FH() *histogram.FloatHistogram { panic("FH() called for fSample") } -func (s fSample) Type() chunkenc.ValueType { +func (fSample) Type() chunkenc.ValueType { return chunkenc.ValFloat } @@ -200,7 +200,7 @@ func (s hSample) T() int64 { return s.t } -func (s hSample) F() float64 { +func (hSample) F() float64 { panic("F() called for hSample") } @@ -212,7 +212,7 @@ func (s hSample) FH() *histogram.FloatHistogram { return s.h.ToFloat(nil) } -func (s hSample) Type() chunkenc.ValueType { +func (hSample) Type() chunkenc.ValueType { return chunkenc.ValHistogram } @@ -229,11 +229,11 @@ func (s fhSample) T() int64 { return s.t } -func (s fhSample) F() float64 { +func (fhSample) F() float64 { panic("F() called for fhSample") } -func (s fhSample) H() *histogram.Histogram { +func (fhSample) H() *histogram.Histogram { panic("H() called for fhSample") } @@ -241,7 +241,7 @@ func (s fhSample) FH() *histogram.FloatHistogram { return s.fh } -func (s fhSample) Type() chunkenc.ValueType { +func (fhSample) Type() chunkenc.ValueType { return chunkenc.ValFloatHistogram } diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index 636473d07c7..5684460db0c 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -125,15 +125,15 @@ type MockQuerier struct { SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet } -func (q *MockQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (*MockQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (q *MockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (*MockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (q *MockQuerier) Close() error { +func (*MockQuerier) Close() error { return nil } @@ -408,10 +408,10 @@ type testSeriesSet struct { series Series } -func (s testSeriesSet) Next() bool { return true } -func (s testSeriesSet) At() Series { return s.series } -func (s testSeriesSet) Err() error { return nil } -func (s testSeriesSet) Warnings() annotations.Annotations { return nil } +func (testSeriesSet) Next() bool { return true } +func (s testSeriesSet) At() Series { return s.series } +func (testSeriesSet) Err() error { return nil } +func (testSeriesSet) Warnings() annotations.Annotations { return nil } // TestSeriesSet returns a mock series set. func TestSeriesSet(series Series) SeriesSet { @@ -422,10 +422,10 @@ type errSeriesSet struct { err error } -func (s errSeriesSet) Next() bool { return false } -func (s errSeriesSet) At() Series { return nil } -func (s errSeriesSet) Err() error { return s.err } -func (s errSeriesSet) Warnings() annotations.Annotations { return nil } +func (errSeriesSet) Next() bool { return false } +func (errSeriesSet) At() Series { return nil } +func (s errSeriesSet) Err() error { return s.err } +func (errSeriesSet) Warnings() annotations.Annotations { return nil } // ErrSeriesSet returns a series set that wraps an error. func ErrSeriesSet(err error) SeriesSet { @@ -443,10 +443,10 @@ type errChunkSeriesSet struct { err error } -func (s errChunkSeriesSet) Next() bool { return false } -func (s errChunkSeriesSet) At() ChunkSeries { return nil } -func (s errChunkSeriesSet) Err() error { return s.err } -func (s errChunkSeriesSet) Warnings() annotations.Annotations { return nil } +func (errChunkSeriesSet) Next() bool { return false } +func (errChunkSeriesSet) At() ChunkSeries { return nil } +func (s errChunkSeriesSet) Err() error { return s.err } +func (errChunkSeriesSet) Warnings() annotations.Annotations { return nil } // ErrChunkSeriesSet returns a chunk series set that wraps an error. func ErrChunkSeriesSet(err error) ChunkSeriesSet { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/client.go b/vendor/github.com/prometheus/prometheus/storage/remote/client.go index 68891f659e6..52404566f23 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/client.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/client.go @@ -418,7 +418,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query, sortSeries bool) } } -func (c *Client) handleSampledResponse(req *prompb.ReadRequest, httpResp *http.Response, sortSeries bool) (storage.SeriesSet, error) { +func (*Client) handleSampledResponse(req *prompb.ReadRequest, httpResp *http.Response, sortSeries bool) (storage.SeriesSet, error) { compressed, err := io.ReadAll(httpResp.Body) if err != nil { return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err) diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go index 3dbf432bcfd..a76662ca091 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -340,7 +340,7 @@ func (e errSeriesSet) Err() error { return e.err } -func (e errSeriesSet) Warnings() annotations.Annotations { return nil } +func (errSeriesSet) Warnings() annotations.Annotations { return nil } // concreteSeriesSet implements storage.SeriesSet. type concreteSeriesSet struct { @@ -357,11 +357,11 @@ func (c *concreteSeriesSet) At() storage.Series { return c.series[c.cur-1] } -func (c *concreteSeriesSet) Err() error { +func (*concreteSeriesSet) Err() error { return nil } -func (c *concreteSeriesSet) Warnings() annotations.Annotations { return nil } +func (*concreteSeriesSet) Warnings() annotations.Annotations { return nil } // concreteSeries implements storage.Series. type concreteSeries struct { @@ -536,7 +536,7 @@ func (c *concreteSeriesIterator) Next() chunkenc.ValueType { } // Err implements chunkenc.Iterator. -func (c *concreteSeriesIterator) Err() error { +func (*concreteSeriesIterator) Err() error { return nil } @@ -607,7 +607,7 @@ func (s *chunkedSeriesSet) Err() error { return s.err } -func (s *chunkedSeriesSet) Warnings() annotations.Annotations { +func (*chunkedSeriesSet) Warnings() annotations.Annotations { return nil } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go index d7f376c96a8..b1f98038fc0 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go @@ -37,7 +37,7 @@ type Watchable interface { type noopScrapeManager struct{} -func (noop *noopScrapeManager) Get() (*scrape.Manager, error) { +func (*noopScrapeManager) Get() (*scrape.Manager, error) { return nil, errors.New("scrape manager not ready") } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go index ae6bb63dfbb..b7445b5d679 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -25,6 +25,7 @@ import ( "slices" "sort" "strconv" + "strings" "time" "unicode/utf8" @@ -41,13 +42,12 @@ import ( ) const ( - sumStr = "_sum" - countStr = "_count" - bucketStr = "_bucket" - leStr = "le" - quantileStr = "quantile" - pInfStr = "+Inf" - createdSuffix = "_created" + sumStr = "_sum" + countStr = "_count" + bucketStr = "_bucket" + leStr = "le" + quantileStr = "quantile" + pInfStr = "+Inf" // maxExemplarRunes is the maximum number of UTF-8 exemplar characters // according to the prometheus specification // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars @@ -118,8 +118,8 @@ var seps = []byte{'\xff'} // if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized. // If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels. func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope scope, settings Settings, - ignoreAttrs []string, logOnOverwrite bool, extras ...string, -) []prompb.Label { + ignoreAttrs []string, logOnOverwrite bool, metadata prompb.MetricMetadata, extras ...string, +) ([]prompb.Label, error) { resourceAttrs := resource.Attributes() serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName) instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID) @@ -142,6 +142,9 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s if haveInstanceID { maxLabelCount++ } + if settings.EnableTypeAndUnitLabels { + maxLabelCount += 2 + } // Ensure attributes are sorted by key for consistent merging of keys which // collide when sanitized. @@ -160,7 +163,10 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s l := make(map[string]string, maxLabelCount) labelNamer := otlptranslator.LabelNamer{UTF8Allowed: settings.AllowUTF8} for _, label := range labels { - finalKey := labelNamer.Build(label.Name) + finalKey, err := labelNamer.Build(label.Name) + if err != nil { + return nil, err + } if existingValue, alreadyExists := l[finalKey]; alreadyExists { l[finalKey] = existingValue + ";" + label.Value } else { @@ -169,23 +175,44 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s } for _, lbl := range promotedAttrs { - normalized := labelNamer.Build(lbl.Name) + normalized, err := labelNamer.Build(lbl.Name) + if err != nil { + return nil, err + } if _, exists := l[normalized]; !exists { l[normalized] = lbl.Value } } if promoteScope { + var rangeErr error scope.attributes.Range(func(k string, v pcommon.Value) bool { - name := labelNamer.Build("otel_scope_" + k) + name, err := labelNamer.Build("otel_scope_" + k) + if err != nil { + rangeErr = err + return false + } l[name] = v.AsString() return true }) + if rangeErr != nil { + return nil, rangeErr + } // Scope Name, Version and Schema URL are added after attributes to ensure they are not overwritten by attributes. l["otel_scope_name"] = scope.name l["otel_scope_version"] = scope.version l["otel_scope_schema_url"] = scope.schemaURL } + if settings.EnableTypeAndUnitLabels { + unitNamer := otlptranslator.UnitNamer{UTF8Allowed: settings.AllowUTF8} + if metadata.Type != prompb.MetricMetadata_UNKNOWN { + l["__type__"] = strings.ToLower(metadata.Type.String()) + } + if metadata.Unit != "" { + l["__unit__"] = unitNamer.Build(metadata.Unit) + } + } + // Map service.name + service.namespace to job. if haveServiceName { val := serviceName.AsString() @@ -219,7 +246,11 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s } // internal labels should be maintained. if len(name) <= 4 || name[:2] != "__" || name[len(name)-2:] != "__" { - name = labelNamer.Build(name) + var err error + name, err = labelNamer.Build(name) + if err != nil { + return nil, err + } } l[name] = extras[i+1] } @@ -229,7 +260,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s labels = append(labels, prompb.Label{Name: k, Value: v}) } - return labels + return labels, nil } func aggregationTemporality(metric pmetric.Metric) (pmetric.AggregationTemporality, bool, error) { @@ -255,7 +286,7 @@ func aggregationTemporality(metric pmetric.Metric) (pmetric.AggregationTemporali // However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets: // https://github.com/prometheus/prometheus/issues/13485. func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, - resource pcommon.Resource, settings Settings, baseName string, scope scope, + resource pcommon.Resource, settings Settings, metadata prompb.MetricMetadata, scope scope, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -264,7 +295,10 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) - baseLabels := createAttributes(resource, pt.Attributes(), scope, settings, nil, false) + baseLabels, err := createAttributes(resource, pt.Attributes(), scope, settings, nil, false, metadata) + if err != nil { + return err + } // If the sum is unset, it indicates the _sum metric point should be // omitted @@ -278,7 +312,7 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo sum.Value = math.Float64frombits(value.StaleNaN) } - sumlabels := createLabels(baseName+sumStr, baseLabels) + sumlabels := createLabels(metadata.MetricFamilyName+sumStr, baseLabels) c.addSample(sum, sumlabels) } @@ -291,7 +325,7 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo count.Value = math.Float64frombits(value.StaleNaN) } - countlabels := createLabels(baseName+countStr, baseLabels) + countlabels := createLabels(metadata.MetricFamilyName+countStr, baseLabels) c.addSample(count, countlabels) // cumulative count for conversion to cumulative histogram @@ -315,7 +349,7 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo bucket.Value = math.Float64frombits(value.StaleNaN) } boundStr := strconv.FormatFloat(bound, 'f', -1, 64) - labels := createLabels(baseName+bucketStr, baseLabels, leStr, boundStr) + labels := createLabels(metadata.MetricFamilyName+bucketStr, baseLabels, leStr, boundStr) ts := c.addSample(bucket, labels) bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: bound}) @@ -329,19 +363,13 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo } else { infBucket.Value = float64(pt.Count()) } - infLabels := createLabels(baseName+bucketStr, baseLabels, leStr, pInfStr) + infLabels := createLabels(metadata.MetricFamilyName+bucketStr, baseLabels, leStr, pInfStr) ts := c.addSample(infBucket, infLabels) bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: math.Inf(1)}) if err := c.addExemplars(ctx, pt, bucketBounds); err != nil { return err } - - startTimestamp := pt.StartTimestamp() - if settings.ExportCreatedMetric && startTimestamp != 0 { - labels := createLabels(baseName+createdSuffix, baseLabels) - c.addTimeSeriesIfNeeded(labels, startTimestamp, pt.Timestamp()) - } } return nil @@ -465,7 +493,7 @@ func findMinAndMaxTimestamps(metric pmetric.Metric, minTimestamp, maxTimestamp p } func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, - settings Settings, baseName string, scope scope, + settings Settings, metadata prompb.MetricMetadata, scope scope, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -474,7 +502,10 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) - baseLabels := createAttributes(resource, pt.Attributes(), scope, settings, nil, false) + baseLabels, err := createAttributes(resource, pt.Attributes(), scope, settings, nil, false, metadata) + if err != nil { + return err + } // treat sum as a sample in an individual TimeSeries sum := &prompb.Sample{ @@ -485,7 +516,7 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin sum.Value = math.Float64frombits(value.StaleNaN) } // sum and count of the summary should append suffix to baseName - sumlabels := createLabels(baseName+sumStr, baseLabels) + sumlabels := createLabels(metadata.MetricFamilyName+sumStr, baseLabels) c.addSample(sum, sumlabels) // treat count as a sample in an individual TimeSeries @@ -496,7 +527,7 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin if pt.Flags().NoRecordedValue() { count.Value = math.Float64frombits(value.StaleNaN) } - countlabels := createLabels(baseName+countStr, baseLabels) + countlabels := createLabels(metadata.MetricFamilyName+countStr, baseLabels) c.addSample(count, countlabels) // process each percentile/quantile @@ -510,15 +541,9 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin quantile.Value = math.Float64frombits(value.StaleNaN) } percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) - qtlabels := createLabels(baseName, baseLabels, quantileStr, percentileStr) + qtlabels := createLabels(metadata.MetricFamilyName, baseLabels, quantileStr, percentileStr) c.addSample(quantile, qtlabels) } - - startTimestamp := pt.StartTimestamp() - if settings.ExportCreatedMetric && startTimestamp != 0 { - createdLabels := createLabels(baseName+createdSuffix, baseLabels) - c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp()) - } } return nil @@ -542,6 +567,20 @@ func createLabels(name string, baseLabels []prompb.Label, extras ...string) []pr return labels } +// addTypeAndUnitLabels appends type and unit labels to the given labels slice. +func addTypeAndUnitLabels(labels []prompb.Label, metadata prompb.MetricMetadata, settings Settings) []prompb.Label { + unitNamer := otlptranslator.UnitNamer{UTF8Allowed: settings.AllowUTF8} + + labels = slices.DeleteFunc(labels, func(l prompb.Label) bool { + return l.Name == "__type__" || l.Name == "__unit__" + }) + + labels = append(labels, prompb.Label{Name: "__type__", Value: strings.ToLower(metadata.Type.String())}) + labels = append(labels, prompb.Label{Name: "__unit__", Value: unitNamer.Build(metadata.Unit)}) + + return labels +} + // getOrCreateTimeSeries returns the time series corresponding to the label set if existent, and false. // Otherwise it creates a new one and returns that, and true. func (c *PrometheusConverter) getOrCreateTimeSeries(lbls []prompb.Label) (*prompb.TimeSeries, bool) { @@ -577,26 +616,10 @@ func (c *PrometheusConverter) getOrCreateTimeSeries(lbls []prompb.Label) (*promp return ts, true } -// addTimeSeriesIfNeeded adds a corresponding time series if it doesn't already exist. -// If the time series doesn't already exist, it gets added with startTimestamp for its value and timestamp for its timestamp, -// both converted to milliseconds. -func (c *PrometheusConverter) addTimeSeriesIfNeeded(lbls []prompb.Label, startTimestamp, timestamp pcommon.Timestamp) { - ts, created := c.getOrCreateTimeSeries(lbls) - if created { - ts.Samples = []prompb.Sample{ - { - // convert ns to ms - Value: float64(convertTimeStamp(startTimestamp)), - Timestamp: convertTimeStamp(timestamp), - }, - } - } -} - // addResourceTargetInfo converts the resource to the target info metric. -func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earliestTimestamp, latestTimestamp time.Time, converter *PrometheusConverter) { +func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earliestTimestamp, latestTimestamp time.Time, converter *PrometheusConverter) error { if settings.DisableTargetInfo { - return + return nil } attributes := resource.Attributes() @@ -614,7 +637,7 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earlies } if nonIdentifyingAttrsCount == 0 { // If we only have job + instance, then target_info isn't useful, so don't add it. - return + return nil } name := targetMetricName @@ -627,7 +650,10 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earlies // Do not pass identifying attributes as ignoreAttrs below. identifyingAttrs = nil } - labels := createAttributes(resource, attributes, scope{}, settings, identifyingAttrs, false, model.MetricNameLabel, name) + labels, err := createAttributes(resource, attributes, scope{}, settings, identifyingAttrs, false, prompb.MetricMetadata{}, model.MetricNameLabel, name) + if err != nil { + return err + } haveIdentifier := false for _, l := range labels { if l.Name == model.JobLabel || l.Name == model.InstanceLabel { @@ -638,7 +664,7 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earlies if !haveIdentifier { // We need at least one identifying label to generate target_info. - return + return nil } // Generate target_info samples starting at earliestTimestamp and ending at latestTimestamp, @@ -656,12 +682,11 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earlies Timestamp: timestamp.UnixMilli(), }) } - if len(ts.Samples) == 0 || ts.Samples[len(ts.Samples)-1].Timestamp < latestTimestamp.UnixMilli() { - ts.Samples = append(ts.Samples, prompb.Sample{ - Value: float64(1), - Timestamp: latestTimestamp.UnixMilli(), - }) - } + ts.Samples = append(ts.Samples, prompb.Sample{ + Value: float64(1), + Timestamp: latestTimestamp.UnixMilli(), + }) + return nil } // convertTimeStamp converts OTLP timestamp in ns to timestamp in ms. diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index 855e122213a..f4199fd1c2a 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -36,8 +36,7 @@ const defaultZeroThreshold = 1e-128 // addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series // as native histogram samples. func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Context, dataPoints pmetric.ExponentialHistogramDataPointSlice, - resource pcommon.Resource, settings Settings, promName string, temporality pmetric.AggregationTemporality, - scope scope, + resource pcommon.Resource, settings Settings, metadata prompb.MetricMetadata, temporality pmetric.AggregationTemporality, scope scope, ) (annotations.Annotations, error) { var annots annotations.Annotations for x := 0; x < dataPoints.Len(); x++ { @@ -53,16 +52,20 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont return annots, err } - lbls := createAttributes( + lbls, err := createAttributes( resource, pt.Attributes(), scope, settings, nil, true, + metadata, model.MetricNameLabel, - promName, + metadata.MetricFamilyName, ) + if err != nil { + return nil, err + } ts, _ := c.getOrCreateTimeSeries(lbls) ts.Histograms = append(ts.Histograms, histogram) @@ -253,8 +256,7 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust } func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, - resource pcommon.Resource, settings Settings, promName string, temporality pmetric.AggregationTemporality, - scope scope, + resource pcommon.Resource, settings Settings, metadata prompb.MetricMetadata, temporality pmetric.AggregationTemporality, scope scope, ) (annotations.Annotations, error) { var annots annotations.Annotations @@ -271,16 +273,20 @@ func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Co return annots, err } - lbls := createAttributes( + lbls, err := createAttributes( resource, pt.Attributes(), scope, settings, nil, true, + metadata, model.MetricNameLabel, - promName, + metadata.MetricFamilyName, ) + if err != nil { + return nil, err + } ts, _ := c.getOrCreateTimeSeries(lbls) ts.Histograms = append(ts.Histograms, histogram) diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 7358cc08202..7de00154a61 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -43,7 +43,6 @@ type Settings struct { Namespace string ExternalLabels map[string]string DisableTargetInfo bool - ExportCreatedMetric bool AddMetricSuffixes bool AllowUTF8 bool PromoteResourceAttributes *PromoteResourceAttributes @@ -53,7 +52,8 @@ type Settings struct { // LookbackDelta is the PromQL engine lookback delta. LookbackDelta time.Duration // PromoteScopeMetadata controls whether to promote OTel scope metadata to metric labels. - PromoteScopeMetadata bool + PromoteScopeMetadata bool + EnableTypeAndUnitLabels bool } // PrometheusConverter converts from OTel write format to Prometheus remote write format. @@ -170,13 +170,18 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric continue } - promName := namer.Build(TranslatorMetricFromOtelMetric(metric)) - c.metadata = append(c.metadata, prompb.MetricMetadata{ + promName, err := namer.Build(TranslatorMetricFromOtelMetric(metric)) + if err != nil { + errs = multierr.Append(errs, err) + continue + } + metadata := prompb.MetricMetadata{ Type: otelMetricTypeToPromMetricType(metric), MetricFamilyName: promName, Help: metric.Description(), Unit: metric.Unit(), - }) + } + c.metadata = append(c.metadata, metadata) // handle individual metrics based on type //exhaustive:enforce @@ -187,7 +192,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - if err := c.addGaugeNumberDataPoints(ctx, dataPoints, resource, settings, promName, scope); err != nil { + if err := c.addGaugeNumberDataPoints(ctx, dataPoints, resource, settings, metadata, scope); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return @@ -199,7 +204,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - if err := c.addSumNumberDataPoints(ctx, dataPoints, resource, metric, settings, promName, scope); err != nil { + if err := c.addSumNumberDataPoints(ctx, dataPoints, resource, settings, metadata, scope); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return @@ -213,7 +218,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric } if settings.ConvertHistogramsToNHCB { ws, err := c.addCustomBucketsHistogramDataPoints( - ctx, dataPoints, resource, settings, promName, temporality, scope, + ctx, dataPoints, resource, settings, metadata, temporality, scope, ) annots.Merge(ws) if err != nil { @@ -223,7 +228,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric } } } else { - if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, promName, scope); err != nil { + if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, metadata, scope); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return @@ -241,7 +246,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric dataPoints, resource, settings, - promName, + metadata, temporality, scope, ) @@ -258,7 +263,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - if err := c.addSummaryDataPoints(ctx, dataPoints, resource, settings, promName, scope); err != nil { + if err := c.addSummaryDataPoints(ctx, dataPoints, resource, settings, metadata, scope); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return @@ -272,7 +277,10 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric if earliestTimestamp < pcommon.Timestamp(math.MaxUint64) { // We have at least one metric sample for this resource. // Generate a corresponding target_info series. - addResourceTargetInfo(resource, settings, earliestTimestamp.AsTime(), latestTimestamp.AsTime(), c) + err := addResourceTargetInfo(resource, settings, earliestTimestamp.AsTime(), latestTimestamp.AsTime(), c) + if err != nil { + errs = multierr.Append(errs, err) + } } } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index df25e17be07..849a73d9873 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -29,7 +29,7 @@ import ( ) func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, settings Settings, name string, scope scope, + resource pcommon.Resource, settings Settings, metadata prompb.MetricMetadata, scope scope, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -37,16 +37,20 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data } pt := dataPoints.At(x) - labels := createAttributes( + labels, err := createAttributes( resource, pt.Attributes(), scope, settings, nil, true, + metadata, model.MetricNameLabel, - name, + metadata.MetricFamilyName, ) + if err != nil { + return err + } sample := &prompb.Sample{ // convert ns to ms Timestamp: convertTimeStamp(pt.Timestamp()), @@ -60,6 +64,7 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data if pt.Flags().NoRecordedValue() { sample.Value = math.Float64frombits(value.StaleNaN) } + c.addSample(sample, labels) } @@ -67,7 +72,7 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data } func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, scope scope, + resource pcommon.Resource, settings Settings, metadata prompb.MetricMetadata, scope scope, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -75,16 +80,20 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo } pt := dataPoints.At(x) - lbls := createAttributes( + lbls, err := createAttributes( resource, pt.Attributes(), scope, settings, nil, true, + metadata, model.MetricNameLabel, - name, + metadata.MetricFamilyName, ) + if err != nil { + return err + } sample := &prompb.Sample{ // convert ns to ms Timestamp: convertTimeStamp(pt.Timestamp()), @@ -98,6 +107,7 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo if pt.Flags().NoRecordedValue() { sample.Value = math.Float64frombits(value.StaleNaN) } + ts := c.addSample(sample, lbls) if ts != nil { exemplars, err := getPromExemplars[pmetric.NumberDataPoint](ctx, &c.everyN, pt) @@ -106,24 +116,6 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo } ts.Exemplars = append(ts.Exemplars, exemplars...) } - - // add created time series if needed - if settings.ExportCreatedMetric && metric.Sum().IsMonotonic() { - startTimestamp := pt.StartTimestamp() - if startTimestamp == 0 { - return nil - } - - createdLabels := make([]prompb.Label, len(lbls)) - copy(createdLabels, lbls) - for i, l := range createdLabels { - if l.Name == model.MetricNameLabel { - createdLabels[i].Value = name + createdSuffix - break - } - } - c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp()) - } } return nil diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read.go b/vendor/github.com/prometheus/prometheus/storage/remote/read.go index 881b5c28d12..e21d1538f50 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/read.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read.go @@ -210,19 +210,19 @@ func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, []s } // LabelValues implements storage.Querier and is a noop. -func (q *querier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (*querier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 return nil, nil, errors.New("not implemented") } // LabelNames implements storage.Querier and is a noop. -func (q *querier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (*querier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 return nil, nil, errors.New("not implemented") } // Close implements storage.Querier and is a noop. -func (q *querier) Close() error { +func (*querier) Close() error { return nil } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go index ba6d100bdff..d22bbacae45 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go @@ -145,7 +145,7 @@ func (s *Storage) ApplyConfig(conf *config.Config) error { } // StartTime implements the Storage interface. -func (s *Storage) StartTime() (int64, error) { +func (*Storage) StartTime() (int64, error) { return int64(model.Latest), nil } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write.go b/vendor/github.com/prometheus/prometheus/storage/remote/write.go index f5c998874b9..3deacfb664b 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write.go @@ -233,7 +233,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { } // Appender implements storage.Storage. -func (rws *WriteStorage) Appender(_ context.Context) storage.Appender { +func (rws *WriteStorage) Appender(context.Context) storage.Appender { return ×tampTracker{ writeStorage: rws, highestRecvTimestamp: rws.highestTimestamp, @@ -302,7 +302,7 @@ func (t *timestampTracker) Append(_ storage.SeriesRef, _ labels.Labels, ts int64 return 0, nil } -func (t *timestampTracker) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) { +func (t *timestampTracker) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.Exemplar) (storage.SeriesRef, error) { t.exemplars++ return 0, nil } @@ -335,7 +335,7 @@ func (t *timestampTracker) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ la return 0, nil } -func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { +func (*timestampTracker) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. // UpdateMetadata is no-op for remote write (where timestampTracker is being used) for now. return 0, nil diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index 29ba1b1bd91..14e4ac72983 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -92,7 +92,7 @@ func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable return h } -func (h *writeHandler) parseProtoMsg(contentType string) (config.RemoteWriteProtoMsg, error) { +func (*writeHandler) parseProtoMsg(contentType string) (config.RemoteWriteProtoMsg, error) { contentType = strings.TrimSpace(contentType) parts := strings.Split(contentType, ";") @@ -142,6 +142,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { }()) h.logger.Error("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) + return } enc := r.Header.Get("Content-Encoding") @@ -513,7 +514,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // handleHistogramZeroSample appends CT as a zero-value sample with CT value as the sample timestamp. // It doesn't return errors in case of out of order CT. -func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, hist writev2.Histogram, ct int64) (storage.SeriesRef, error) { +func (*writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, hist writev2.Histogram, ct int64) (storage.SeriesRef, error) { var err error if hist.IsFloatHistogram() { ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, nil, hist.ToFloatHistogram()) @@ -533,6 +534,8 @@ type OTLPOptions struct { // LookbackDelta is the query lookback delta. // Used to calculate the target_info sample timestamp interval. LookbackDelta time.Duration + // Add type and unit labels to the metrics. + EnableTypeAndUnitLabels bool } // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and @@ -548,9 +551,10 @@ func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendabl logger: logger, appendable: appendable, }, - config: configFunc, - allowDeltaTemporality: opts.NativeDelta, - lookbackDelta: opts.LookbackDelta, + config: configFunc, + allowDeltaTemporality: opts.NativeDelta, + lookbackDelta: opts.LookbackDelta, + enableTypeAndUnitLabels: opts.EnableTypeAndUnitLabels, } wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex} @@ -585,9 +589,10 @@ func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendabl type rwExporter struct { *writeHandler - config func() config.Config - allowDeltaTemporality bool - lookbackDelta time.Duration + config func() config.Config + allowDeltaTemporality bool + lookbackDelta time.Duration + enableTypeAndUnitLabels bool } func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { @@ -601,9 +606,10 @@ func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) er PromoteResourceAttributes: otlptranslator.NewPromoteResourceAttributes(otlpCfg), KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes, ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB, + PromoteScopeMetadata: otlpCfg.PromoteScopeMetadata, AllowDeltaTemporality: rw.allowDeltaTemporality, LookbackDelta: rw.lookbackDelta, - PromoteScopeMetadata: otlpCfg.PromoteScopeMetadata, + EnableTypeAndUnitLabels: rw.enableTypeAndUnitLabels, }) if err != nil { rw.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) @@ -620,7 +626,7 @@ func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) er return err } -func (rw *rwExporter) Capabilities() consumer.Capabilities { +func (*rwExporter) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } diff --git a/vendor/github.com/prometheus/prometheus/storage/series.go b/vendor/github.com/prometheus/prometheus/storage/series.go index e61b2259370..2fff56785a4 100644 --- a/vendor/github.com/prometheus/prometheus/storage/series.go +++ b/vendor/github.com/prometheus/prometheus/storage/series.go @@ -65,7 +65,7 @@ func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]chunks.Sampl if err != nil { return &ChunkSeriesEntry{ Lset: lset, - ChunkIteratorFn: func(_ chunks.Iterator) chunks.Iterator { + ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator { return errChunksIterator{err: err} }, } @@ -169,7 +169,7 @@ func (it *listSeriesIterator) Seek(t int64) chunkenc.ValueType { return it.samples.Get(it.idx).Type() } -func (it *listSeriesIterator) Err() error { return nil } +func (*listSeriesIterator) Err() error { return nil } type listSeriesIteratorWithCopy struct { *listSeriesIterator @@ -223,7 +223,7 @@ func (it *listChunkSeriesIterator) Next() bool { return it.idx < len(it.chks) } -func (it *listChunkSeriesIterator) Err() error { return nil } +func (*listChunkSeriesIterator) Err() error { return nil } type chunkSetToSeriesSet struct { ChunkSeriesSet @@ -432,9 +432,9 @@ type errChunksIterator struct { err error } -func (e errChunksIterator) At() chunks.Meta { return chunks.Meta{} } -func (e errChunksIterator) Next() bool { return false } -func (e errChunksIterator) Err() error { return e.err } +func (errChunksIterator) At() chunks.Meta { return chunks.Meta{} } +func (errChunksIterator) Next() bool { return false } +func (e errChunksIterator) Err() error { return e.err } // ExpandSamples iterates over all samples in the iterator, buffering all in slice. // Optionally it takes samples constructor, useful when you want to compare sample slices with different diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go index 7082f34c3f4..1520619f4b4 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go @@ -223,17 +223,17 @@ type mockSeriesIterator struct { currIndex int } -func (it *mockSeriesIterator) Seek(int64) ValueType { return ValNone } +func (*mockSeriesIterator) Seek(int64) ValueType { return ValNone } func (it *mockSeriesIterator) At() (int64, float64) { return it.timeStamps[it.currIndex], it.values[it.currIndex] } -func (it *mockSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) { +func (*mockSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) { return math.MinInt64, nil } -func (it *mockSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { +func (*mockSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { return math.MinInt64, nil } @@ -249,7 +249,7 @@ func (it *mockSeriesIterator) Next() ValueType { return ValNone } -func (it *mockSeriesIterator) Err() error { return nil } +func (*mockSeriesIterator) Err() error { return nil } // NewNopIterator returns a new chunk iterator that does not hold any data. func NewNopIterator() Iterator { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go index e5ad4028bbb..564b312db54 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go @@ -58,7 +58,7 @@ type xorValue struct { } // Encoding returns the encoding type. -func (c *FloatHistogramChunk) Encoding() Encoding { +func (*FloatHistogramChunk) Encoding() Encoding { return EncFloatHistogram } @@ -215,7 +215,7 @@ func (a *FloatHistogramAppender) NumSamples() int { // Append implements Appender. This implementation panics because normal float // samples must never be appended to a histogram chunk. -func (a *FloatHistogramAppender) Append(int64, float64) { +func (*FloatHistogramAppender) Append(int64, float64) { panic("appended a float sample to a histogram chunk") } @@ -343,7 +343,7 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) ( // original deltas to a new set of deltas to match a new span layout that adds // buckets, we simply need to generate a list of inserts. // -// Note: Within expandSpansForward we don't have to worry about the changes to the +// Note: Within expandFloatSpansAndBuckets we don't have to worry about the changes to the // spans themselves, thanks to the iterators we get to work with the more useful // bucket indices (which of course directly correspond to the buckets we have to // adjust). @@ -378,6 +378,48 @@ func expandFloatSpansAndBuckets(a, b []histogram.Span, aBuckets []xorValue, bBuc bCount = bBuckets[bCountIdx] } + // addInsert updates the current Insert with a new insert at the given + // bucket index (otherIdx). + addInsert := func(inserts []Insert, insert *Insert, otherIdx int) []Insert { + if insert.num == 0 { + // First insert. + insert.bucketIdx = otherIdx + } else if insert.bucketIdx+insert.num != otherIdx { + // Insert is not continuous from previous insert. + inserts = append(inserts, *insert) + insert.num = 0 + insert.bucketIdx = otherIdx + } + insert.num++ + return inserts + } + + advanceA := func() { + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + aIdx, aOK = ai.Next() + aInter.pos++ + aCountIdx++ + if aOK { + aCount = aBuckets[aCountIdx].value + } + } + + advanceB := func() { + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + bIdx, bOK = bi.Next() + bInter.pos++ + bCountIdx++ + if bOK { + bCount = bBuckets[bCountIdx] + } + } + loop: for { switch { @@ -389,105 +431,37 @@ loop: return nil, nil, false } - // Finish WIP insert for a and reset. - if aInter.num > 0 { - aInserts = append(aInserts, aInter) - aInter.num = 0 - } - - // Finish WIP insert for b and reset. - if bInter.num > 0 { - bInserts = append(bInserts, bInter) - bInter.num = 0 - } - - aIdx, aOK = ai.Next() - bIdx, bOK = bi.Next() - aInter.pos++ // Advance potential insert position. - aCountIdx++ // Advance absolute bucket count index for a. - if aOK { - aCount = aBuckets[aCountIdx].value - } - bInter.pos++ // Advance potential insert position. - bCountIdx++ // Advance absolute bucket count index for b. - if bOK { - bCount = bBuckets[bCountIdx] - } + advanceA() + advanceB() continue case aIdx < bIdx: // b misses a bucket index that is in a. // This is ok if the count in a is 0, in which case we make a note to // fill in the bucket in b and advance a. if aCount == 0 { - bInter.num++ // Mark that we need to insert a bucket in b. - bInter.bucketIdx = aIdx - // Advance a - if aInter.num > 0 { - aInserts = append(aInserts, aInter) - aInter.num = 0 - } - aIdx, aOK = ai.Next() - aInter.pos++ - aCountIdx++ - if aOK { - aCount = aBuckets[aCountIdx].value - } + bInserts = addInsert(bInserts, &bInter, aIdx) + advanceA() continue } // Otherwise we are missing a bucket that was in use in a, which is a reset. return nil, nil, false case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare. - aInter.num++ - bInter.bucketIdx = bIdx - // Advance b - if bInter.num > 0 { - bInserts = append(bInserts, bInter) - bInter.num = 0 - } - bIdx, bOK = bi.Next() - bInter.pos++ - bCountIdx++ - if bOK { - bCount = bBuckets[bCountIdx] - } + aInserts = addInsert(aInserts, &aInter, bIdx) + advanceB() } case aOK && !bOK: // b misses a value that is in a. // This is ok if the count in a is 0, in which case we make a note to // fill in the bucket in b and advance a. if aCount == 0 { - bInter.num++ - bInter.bucketIdx = aIdx - // Advance a - if aInter.num > 0 { - aInserts = append(aInserts, aInter) - aInter.num = 0 - } - aIdx, aOK = ai.Next() - aInter.pos++ // Advance potential insert position. - // Update absolute bucket counts for a. - aCountIdx++ - if aOK { - aCount = aBuckets[aCountIdx].value - } + bInserts = addInsert(bInserts, &bInter, aIdx) + advanceA() continue } // Otherwise we are missing a bucket that was in use in a, which is a reset. return nil, nil, false case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. - aInter.num++ - bInter.bucketIdx = bIdx - // Advance b - if bInter.num > 0 { - bInserts = append(bInserts, bInter) - bInter.num = 0 - } - bIdx, bOK = bi.Next() - bInter.pos++ // Advance potential insert position. - // Update absolute bucket counts for b. - bCountIdx++ - if bOK { - bCount = bBuckets[bCountIdx] - } + aInserts = addInsert(aInserts, &aInter, bIdx) + advanceB() default: // Both iterators ran out. We're done. if aInter.num > 0 { aInserts = append(aInserts, aInter) @@ -714,7 +688,7 @@ func (a *FloatHistogramAppender) recode( // recodeHistogram converts the current histogram (in-place) to accommodate an expansion of the set of // (positive and/or negative) buckets used. -func (a *FloatHistogramAppender) recodeHistogram( +func (*FloatHistogramAppender) recodeHistogram( fh *histogram.FloatHistogram, pBackwardInter, nBackwardInter []Insert, ) { @@ -728,7 +702,7 @@ func (a *FloatHistogramAppender) recodeHistogram( } } -func (a *FloatHistogramAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) { +func (*FloatHistogramAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) { panic("appended a histogram sample to a float histogram chunk") } @@ -782,7 +756,7 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend // The histogram needs to be expanded to have the extra empty buckets // of the chunk. if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 { - // No new chunks from the histogram, so the spans of the appender can accommodate the new buckets. + // No new buckets from the histogram, so the spans of the appender can accommodate the new buckets. // However we need to make a copy in case the input is sharing spans from an iterator. h.PositiveSpans = make([]histogram.Span, len(a.pSpans)) copy(h.PositiveSpans, a.pSpans) @@ -898,11 +872,11 @@ func (it *floatHistogramIterator) Seek(t int64) ValueType { return ValFloatHistogram } -func (it *floatHistogramIterator) At() (int64, float64) { +func (*floatHistogramIterator) At() (int64, float64) { panic("cannot call floatHistogramIterator.At") } -func (it *floatHistogramIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) { +func (*floatHistogramIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) { panic("cannot call floatHistogramIterator.AtHistogram") } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go index 0f54eb69288..6039fef0e04 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go @@ -51,7 +51,7 @@ func (c *HistogramChunk) Reset(stream []byte) { } // Encoding returns the encoding type. -func (c *HistogramChunk) Encoding() Encoding { +func (*HistogramChunk) Encoding() Encoding { return EncHistogram } @@ -234,7 +234,7 @@ func (a *HistogramAppender) NumSamples() int { // Append implements Appender. This implementation panics because normal float // samples must never be appended to a histogram chunk. -func (a *HistogramAppender) Append(int64, float64) { +func (*HistogramAppender) Append(int64, float64) { panic("appended a float sample to a histogram chunk") } @@ -374,7 +374,7 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) ( // original deltas to a new set of deltas to match a new span layout that adds // buckets, we simply need to generate a list of inserts. // -// Note: Within expandSpansForward we don't have to worry about the changes to the +// Note: Within expandIntSpansAndBuckets we don't have to worry about the changes to the // spans themselves, thanks to the iterators we get to work with the more useful // bucket indices (which of course directly correspond to the buckets we have to // adjust). @@ -409,6 +409,48 @@ func expandIntSpansAndBuckets(a, b []histogram.Span, aBuckets, bBuckets []int64) bCount = bBuckets[bCountIdx] } + // addInsert updates the current Insert with a new insert at the given + // bucket index (otherIdx). + addInsert := func(inserts []Insert, insert *Insert, otherIdx int) []Insert { + if insert.num == 0 { + // First insert. + insert.bucketIdx = otherIdx + } else if insert.bucketIdx+insert.num != otherIdx { + // Insert is not continuous from previous insert. + inserts = append(inserts, *insert) + insert.num = 0 + insert.bucketIdx = otherIdx + } + insert.num++ + return inserts + } + + advanceA := func() { + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + aIdx, aOK = ai.Next() + aInter.pos++ + aCountIdx++ + if aOK { + aCount += aBuckets[aCountIdx] + } + } + + advanceB := func() { + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + bIdx, bOK = bi.Next() + bInter.pos++ + bCountIdx++ + if bOK { + bCount += bBuckets[bCountIdx] + } + } + loop: for { switch { @@ -420,105 +462,37 @@ loop: return nil, nil, false } - // Finish WIP insert for a and reset. - if aInter.num > 0 { - aInserts = append(aInserts, aInter) - aInter.num = 0 - } - - // Finish WIP insert for b and reset. - if bInter.num > 0 { - bInserts = append(bInserts, bInter) - bInter.num = 0 - } - - aIdx, aOK = ai.Next() - bIdx, bOK = bi.Next() - aInter.pos++ // Advance potential insert position. - aCountIdx++ // Advance absolute bucket count index for a. - if aOK { - aCount += aBuckets[aCountIdx] - } - bInter.pos++ // Advance potential insert position. - bCountIdx++ // Advance absolute bucket count index for b. - if bOK { - bCount += bBuckets[bCountIdx] - } + advanceA() + advanceB() continue case aIdx < bIdx: // b misses a bucket index that is in a. // This is ok if the count in a is 0, in which case we make a note to // fill in the bucket in b and advance a. if aCount == 0 { - bInter.num++ // Mark that we need to insert a bucket in b. - bInter.bucketIdx = aIdx - // Advance a - if aInter.num > 0 { - aInserts = append(aInserts, aInter) - aInter.num = 0 - } - aIdx, aOK = ai.Next() - aInter.pos++ - aCountIdx++ - if aOK { - aCount += aBuckets[aCountIdx] - } + bInserts = addInsert(bInserts, &bInter, aIdx) + advanceA() continue } // Otherwise we are missing a bucket that was in use in a, which is a reset. return nil, nil, false case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare. - aInter.num++ - aInter.bucketIdx = bIdx - // Advance b - if bInter.num > 0 { - bInserts = append(bInserts, bInter) - bInter.num = 0 - } - bIdx, bOK = bi.Next() - bInter.pos++ - bCountIdx++ - if bOK { - bCount += bBuckets[bCountIdx] - } + aInserts = addInsert(aInserts, &aInter, bIdx) + advanceB() } case aOK && !bOK: // b misses a value that is in a. // This is ok if the count in a is 0, in which case we make a note to // fill in the bucket in b and advance a. if aCount == 0 { - bInter.num++ - bInter.bucketIdx = aIdx - // Advance a - if aInter.num > 0 { - aInserts = append(aInserts, aInter) - aInter.num = 0 - } - aIdx, aOK = ai.Next() - aInter.pos++ // Advance potential insert position. - // Update absolute bucket counts for a. - aCountIdx++ - if aOK { - aCount += aBuckets[aCountIdx] - } + bInserts = addInsert(bInserts, &bInter, aIdx) + advanceA() continue } // Otherwise we are missing a bucket that was in use in a, which is a reset. return nil, nil, false case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. - aInter.num++ - aInter.bucketIdx = bIdx - // Advance b - if bInter.num > 0 { - bInserts = append(bInserts, bInter) - bInter.num = 0 - } - bIdx, bOK = bi.Next() - bInter.pos++ // Advance potential insert position. - // Update absolute bucket counts for b. - bCountIdx++ - if bOK { - bCount += bBuckets[bCountIdx] - } + aInserts = addInsert(aInserts, &aInter, bIdx) + advanceB() default: // Both iterators ran out. We're done. if aInter.num > 0 { aInserts = append(aInserts, aInter) @@ -757,7 +731,7 @@ func (a *HistogramAppender) recode( // recodeHistogram converts the current histogram (in-place) to accommodate an // expansion of the set of (positive and/or negative) buckets used. -func (a *HistogramAppender) recodeHistogram( +func (*HistogramAppender) recodeHistogram( h *histogram.Histogram, pBackwardInserts, nBackwardInserts []Insert, ) { @@ -775,7 +749,7 @@ func (a *HistogramAppender) writeSumDelta(v float64) { xorWrite(a.b, v, a.sum, &a.leading, &a.trailing) } -func (a *HistogramAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) { +func (*HistogramAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) { panic("appended a float histogram sample to a histogram chunk") } @@ -823,7 +797,7 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h // The histogram needs to be expanded to have the extra empty buckets // of the chunk. if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 { - // No new chunks from the histogram, so the spans of the appender can accommodate the new buckets. + // No new buckets from the histogram, so the spans of the appender can accommodate the new buckets. // However we need to make a copy in case the input is sharing spans from an iterator. h.PositiveSpans = make([]histogram.Span, len(a.pSpans)) copy(h.PositiveSpans, a.pSpans) @@ -952,7 +926,7 @@ func (it *histogramIterator) Seek(t int64) ValueType { return ValHistogram } -func (it *histogramIterator) At() (int64, float64) { +func (*histogramIterator) At() (int64, float64) { panic("cannot call histogramIterator.At") } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go index 7bb31acf00c..5ee783fd683 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go @@ -284,101 +284,12 @@ type Insert struct { bucketIdx int } -// Deprecated: expandSpansForward, use expandIntSpansAndBuckets or -// expandFloatSpansAndBuckets instead. -// expandSpansForward is left here for reference. -// expandSpansForward returns the inserts to expand the bucket spans 'a' so that -// they match the spans in 'b'. 'b' must cover the same or more buckets than -// 'a', otherwise the function will return false. -// -// Example: -// -// Let's say the old buckets look like this: -// -// span syntax: [offset, length] -// spans : [ 0 , 2 ] [2,1] [ 3 , 2 ] [3,1] [1,1] -// bucket idx : [0] [1] 2 3 [4] 5 6 7 [8] [9] 10 11 12 [13] 14 [15] -// raw values 6 3 3 2 4 5 1 -// deltas 6 -3 0 -1 2 1 -4 -// -// But now we introduce a new bucket layout. (Carefully chosen example where we -// have a span appended, one unchanged[*], one prepended, and two merge - in -// that order.) -// -// [*] unchanged in terms of which bucket indices they represent. but to achieve -// that, their offset needs to change if "disrupted" by spans changing ahead of -// them -// -// \/ this one is "unchanged" -// spans : [ 0 , 3 ] [1,1] [ 1 , 4 ] [ 3 , 3 ] -// bucket idx : [0] [1] [2] 3 [4] 5 [6] [7] [8] [9] 10 11 12 [13] [14] [15] -// raw values 6 3 0 3 0 0 2 4 5 0 1 -// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1 -// delta mods: / \ / \ / \ -// -// Note for histograms with delta-encoded buckets: Whenever any new buckets are -// introduced, the subsequent "old" bucket needs to readjust its delta to the -// new base of 0. Thus, for the caller who wants to transform the set of -// original deltas to a new set of deltas to match a new span layout that adds -// buckets, we simply need to generate a list of inserts. -// -// Note: Within expandSpansForward we don't have to worry about the changes to the -// spans themselves, thanks to the iterators we get to work with the more useful -// bucket indices (which of course directly correspond to the buckets we have to -// adjust). -func expandSpansForward(a, b []histogram.Span) (forward []Insert, ok bool) { - ai := newBucketIterator(a) - bi := newBucketIterator(b) - - var inserts []Insert - - // When inter.num becomes > 0, this becomes a valid insert that should - // be yielded when we finish a streak of new buckets. - var inter Insert - - av, aOK := ai.Next() - bv, bOK := bi.Next() -loop: - for { - switch { - case aOK && bOK: - switch { - case av == bv: // Both have an identical value. move on! - // Finish WIP insert and reset. - if inter.num > 0 { - inserts = append(inserts, inter) - } - inter.num = 0 - av, aOK = ai.Next() - bv, bOK = bi.Next() - inter.pos++ - case av < bv: // b misses a value that is in a. - return inserts, false - case av > bv: // a misses a value that is in b. Forward b and recompare. - inter.num++ - bv, bOK = bi.Next() - } - case aOK && !bOK: // b misses a value that is in a. - return inserts, false - case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. - inter.num++ - bv, bOK = bi.Next() - default: // Both iterators ran out. We're done. - if inter.num > 0 { - inserts = append(inserts, inter) - } - break loop - } - } - - return inserts, true -} - -// expandSpansBothWays is similar to expandSpansForward, but now b may also -// cover an entirely different set of buckets. The function returns the -// “forward” inserts to expand 'a' to also cover all the buckets exclusively -// covered by 'b', and it returns the “backward” inserts to expand 'b' to also -// cover all the buckets exclusively covered by 'a'. +// expandSpansBothWays is similar to expandFloatSpansAndBuckets and +// expandIntSpansAndBuckets, but now b may also cover an entirely different set +// of buckets and counter resets are ignored. The function returns the “forward” +// inserts to expand 'a' to also cover all the buckets exclusively covered by +// 'b', and it returns the “backward” inserts to expand 'b' to also cover all +// the buckets exclusively covered by 'a'. func expandSpansBothWays(a, b []histogram.Span) (forward, backward []Insert, mergedSpans []histogram.Span) { ai := newBucketIterator(a) bi := newBucketIterator(b) @@ -488,14 +399,24 @@ func insert[BV bucketValue](in, out []BV, inserts []Insert, deltas bool) []BV { ii int // The next insert to process. ) for i, d := range in { - if ii < len(inserts) && i == inserts[ii].pos { + if ii >= len(inserts) || i != inserts[ii].pos { + // No inserts at this position, the original delta is still valid. + out[oi] = d + oi++ + v += d + continue + } + // Process inserts. + firstInsert := true + for ii < len(inserts) && i == inserts[ii].pos { // We have an insert! // Add insert.num new delta values such that their // bucket values equate 0. When deltas==false, it means // that it is an absolute value. So we set it to 0 // directly. - if deltas { + if deltas && firstInsert { out[oi] = -v + firstInsert = false // No need to go to 0 in further inserts. } else { out[oi] = 0 } @@ -505,32 +426,30 @@ func insert[BV bucketValue](in, out []BV, inserts []Insert, deltas bool) []BV { oi++ } ii++ - - // Now save the value from the input. The delta value we - // should save is the original delta value + the last - // value of the point before the insert (to undo the - // delta that was introduced by the insert). When - // deltas==false, it means that it is an absolute value, - // so we set it directly to the value in the 'in' slice. - if deltas { - out[oi] = d + v - } else { - out[oi] = d - } - oi++ - v = d + v - continue } - // If there was no insert, the original delta is still valid. - out[oi] = d + // Now save the value from the input. The delta value we + // should save is the original delta value + the last + // value of the point before the insert (to undo the + // delta that was introduced by the insert). When + // deltas==false, it means that it is an absolute value, + // so we set it directly to the value in the 'in' slice. + if deltas { + out[oi] = d + v + } else { + out[oi] = d + } oi++ v += d } - switch ii { - case len(inserts): - // All inserts processed. Nothing more to do. - case len(inserts) - 1: - // One more insert to process at the end. + // Insert empty buckets at the end. + for ii < len(inserts) { + if inserts[ii].pos < len(in) { + panic("leftover inserts must be after the current buckets") + } + // Add insert.num new delta values such that their + // bucket values equate 0. When deltas==false, it means + // that it is an absolute value. So we set it to 0 + // directly. if deltas { out[oi] = -v } else { @@ -541,8 +460,8 @@ func insert[BV bucketValue](in, out []BV, inserts []Insert, deltas bool) []BV { out[oi] = 0 oi++ } - default: - panic("unprocessed inserts left") + ii++ + v = 0 } return out } @@ -628,7 +547,7 @@ func adjustForInserts(spans []histogram.Span, inserts []Insert) (mergedSpans []h } } for i < len(inserts) { - addBucket(inserts[i].bucketIdx) + addBucket(insertIdx) consumeInsert() } return diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go index ac75a5994bb..5a37ebfea92 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go @@ -71,7 +71,7 @@ func (c *XORChunk) Reset(stream []byte) { } // Encoding returns the encoding type. -func (c *XORChunk) Encoding() Encoding { +func (*XORChunk) Encoding() Encoding { return EncXOR } @@ -223,11 +223,11 @@ func (a *xorAppender) writeVDelta(v float64) { xorWrite(a.b, v, a.v, &a.leading, &a.trailing) } -func (a *xorAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) { +func (*xorAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) { panic("appended a histogram sample to a float chunk") } -func (a *xorAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) { +func (*xorAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) { panic("appended a float histogram sample to a float chunk") } @@ -263,11 +263,11 @@ func (it *xorIterator) At() (int64, float64) { return it.t, it.val } -func (it *xorIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) { +func (*xorIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) { panic("cannot call xorIterator.AtHistogram") } -func (it *xorIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { +func (*xorIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { panic("cannot call xorIterator.AtFloatHistogram") } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go index 876b42cb26a..89c508aa8f6 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go @@ -172,7 +172,7 @@ func (f *chunkPos) shouldCutNewFile(bytesToWrite uint64) bool { // bytesToWriteForChunk returns the number of bytes that will need to be written for the given chunk size, // including all meta data before and after the chunk data. // Head chunk format: https://github.com/prometheus/prometheus/blob/main/tsdb/docs/format/head_chunks.md#chunk -func (f *chunkPos) bytesToWriteForChunk(chkLen uint64) uint64 { +func (*chunkPos) bytesToWriteForChunk(chkLen uint64) uint64 { // Headers. bytes := uint64(SeriesRefSize) + 2*MintMaxtSize + ChunkEncodingSize @@ -283,16 +283,16 @@ const ( OutOfOrderMask = uint8(0b10000000) ) -func (cdm *ChunkDiskMapper) ApplyOutOfOrderMask(sourceEncoding chunkenc.Encoding) chunkenc.Encoding { +func (*ChunkDiskMapper) ApplyOutOfOrderMask(sourceEncoding chunkenc.Encoding) chunkenc.Encoding { enc := uint8(sourceEncoding) | OutOfOrderMask return chunkenc.Encoding(enc) } -func (cdm *ChunkDiskMapper) IsOutOfOrderChunk(e chunkenc.Encoding) bool { +func (*ChunkDiskMapper) IsOutOfOrderChunk(e chunkenc.Encoding) bool { return (uint8(e) & OutOfOrderMask) != 0 } -func (cdm *ChunkDiskMapper) RemoveMasks(sourceEncoding chunkenc.Encoding) chunkenc.Encoding { +func (*ChunkDiskMapper) RemoveMasks(sourceEncoding chunkenc.Encoding) chunkenc.Encoding { restored := uint8(sourceEncoding) & (^OutOfOrderMask) return chunkenc.Encoding(restored) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 7828fd08605..0641b757207 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -178,7 +178,7 @@ type LeveledCompactorOptions struct { type PostingsDecoderFactory func(meta *BlockMeta) index.PostingsDecoder -func DefaultPostingsDecoderFactory(_ *BlockMeta) index.PostingsDecoder { +func DefaultPostingsDecoderFactory(*BlockMeta) index.PostingsDecoder { return index.DecodePostingsRaw } @@ -761,7 +761,7 @@ type DefaultBlockPopulator struct{} // PopulateBlock fills the index and chunk writers with new data gathered as the union // of the provided blocks. It returns meta information for the new block. // It expects sorted blocks input by mint. -func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) (err error) { +func (DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) (err error) { if len(blocks) == 0 { return errors.New("cannot populate block from no readers") } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 0f2d8c311e5..093ec5ab27b 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -1074,6 +1074,16 @@ func (db *DB) Dir() string { return db.dir } +// BlockMetas returns the list of metadata for all blocks. +func (db *DB) BlockMetas() []BlockMeta { + blocks := db.Blocks() + metas := make([]BlockMeta, 0, len(blocks)) + for _, b := range blocks { + metas = append(metas, b.Meta()) + } + return metas +} + func (db *DB) run(ctx context.Context) { defer close(db.donec) @@ -1916,7 +1926,7 @@ func OverlappingBlocks(bm []BlockMeta) Overlaps { return overlapGroups } -func (db *DB) String() string { +func (*DB) String() string { return "HEAD" } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go b/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go index 7b5ac26cf1a..8ea1acf1a96 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go @@ -140,11 +140,11 @@ func (ce *CircularExemplarStorage) Appender() *CircularExemplarStorage { return ce } -func (ce *CircularExemplarStorage) ExemplarQuerier(_ context.Context) (storage.ExemplarQuerier, error) { +func (ce *CircularExemplarStorage) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) { return ce, nil } -func (ce *CircularExemplarStorage) Querier(_ context.Context) (storage.ExemplarQuerier, error) { +func (ce *CircularExemplarStorage) Querier(context.Context) (storage.ExemplarQuerier, error) { return ce, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go index 1672a92d4c9..ad039d2231c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go @@ -22,6 +22,10 @@ func DirSize(dir string) (int64, error) { var size int64 err := filepath.Walk(dir, func(_ string, info os.FileInfo, err error) error { if err != nil { + // Ignore missing files that may have been deleted during the walk. + if os.IsNotExist(err) { + return nil + } return err } if !info.IsDir() { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_unsupported.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_unsupported.go index fb0b28fcc3c..3e3555ebf8e 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_unsupported.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_unsupported.go @@ -24,6 +24,6 @@ func NewBufioWriterWithSeek(f *os.File, size int) (BufWriter, error) { return &writer{bufio.NewWriterSize(f, size)}, nil } -func NewDirectIOWriter(_ *os.File, _ int) (BufWriter, error) { +func NewDirectIOWriter(*os.File, int) (BufWriter, error) { return nil, errDirectIOUnsupported } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 7763d272b7c..8834fd31a60 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -1730,7 +1730,7 @@ func (h *Head) Close() error { // String returns an human readable representation of the TSDB head. It's important to // keep this function in order to avoid the struct dump when the head is stringified in // errors or logs. -func (h *Head) String() string { +func (*Head) String() string { return "head" } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index 05299f048d9..43e523cae1c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -148,7 +148,7 @@ func (a *initAppender) Rollback() error { } // Appender returns a new Appender on the database. -func (h *Head) Appender(_ context.Context) storage.Appender { +func (h *Head) Appender(context.Context) storage.Appender { h.metrics.activeAppenders.Inc() // The head cache might not have a starting point yet. The init appender diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_other.go b/vendor/github.com/prometheus/prometheus/tsdb/head_other.go index 45bb2285f00..7e1eea8b055 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_other.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_other.go @@ -27,6 +27,6 @@ func (s *memSeries) labels() labels.Labels { } // RebuildSymbolTable is a no-op when not using dedupelabels. -func (h *Head) RebuildSymbolTable(_ *slog.Logger) *labels.SymbolTable { +func (*Head) RebuildSymbolTable(*slog.Logger) *labels.SymbolTable { return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go index b653b5dc14c..26b95880d35 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -49,7 +49,7 @@ type headIndexReader struct { mint, maxt int64 } -func (h *headIndexReader) Close() error { +func (*headIndexReader) Close() error { return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go index edcb92a719a..cd598fed655 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go @@ -15,7 +15,6 @@ package index import ( "bufio" - "bytes" "context" "encoding/binary" "fmt" @@ -142,8 +141,7 @@ type Writer struct { lastSymbol string symbolCache map[string]uint32 // From symbol to index in table. - labelIndexes []labelIndexHashEntry // Label index offsets. - labelNames map[string]uint64 // Label names, and their usage. + labelNames map[string]uint64 // Label names, and their usage. // Hold last series to validate that clients insert new series in order. lastSeries labels.Labels @@ -393,9 +391,6 @@ func (w *Writer) ensureStage(s indexWriterStage) error { if err := w.writePostingsToTmpFiles(); err != nil { return err } - if err := w.writeLabelIndices(); err != nil { - return err - } w.toc.Postings = w.f.pos if err := w.writePostings(); err != nil { @@ -403,9 +398,6 @@ func (w *Writer) ensureStage(s indexWriterStage) error { } w.toc.LabelIndicesTable = w.f.pos - if err := w.writeLabelIndexesOffsetTable(); err != nil { - return err - } w.toc.PostingsTable = w.f.pos if err := w.writePostingsOffsetTable(); err != nil { @@ -592,147 +584,6 @@ func (w *Writer) finishSymbols() error { return nil } -func (w *Writer) writeLabelIndices() error { - if err := w.fPO.Flush(); err != nil { - return err - } - - // Find all the label values in the tmp posting offset table. - f, err := fileutil.OpenMmapFile(w.fPO.name) - if err != nil { - return err - } - defer f.Close() - - d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.fPO.pos)) - cnt := w.cntPO - current := []byte{} - values := []uint32{} - for d.Err() == nil && cnt > 0 { - cnt-- - d.Uvarint() // Keycount. - name := d.UvarintBytes() // Label name. - value := d.UvarintBytes() // Label value. - d.Uvarint64() // Offset. - if len(name) == 0 { - continue // All index is ignored. - } - - if !bytes.Equal(name, current) && len(values) > 0 { - // We've reached a new label name. - if err := w.writeLabelIndex(string(current), values); err != nil { - return err - } - values = values[:0] - } - current = name - sid, ok := w.symbolCache[string(value)] - if !ok { - return fmt.Errorf("symbol entry for %q does not exist", string(value)) - } - values = append(values, sid) - } - if d.Err() != nil { - return d.Err() - } - - // Handle the last label. - if len(values) > 0 { - if err := w.writeLabelIndex(string(current), values); err != nil { - return err - } - } - return nil -} - -func (w *Writer) writeLabelIndex(name string, values []uint32) error { - // Align beginning to 4 bytes for more efficient index list scans. - if err := w.addPadding(4); err != nil { - return err - } - - w.labelIndexes = append(w.labelIndexes, labelIndexHashEntry{ - keys: []string{name}, - offset: w.f.pos, - }) - - startPos := w.f.pos - // Leave 4 bytes of space for the length, which will be calculated later. - if err := w.write([]byte("alen")); err != nil { - return err - } - w.crc32.Reset() - - w.buf1.Reset() - w.buf1.PutBE32int(1) // Number of names. - w.buf1.PutBE32int(len(values)) - w.buf1.WriteToHash(w.crc32) - if err := w.write(w.buf1.Get()); err != nil { - return err - } - - for _, v := range values { - w.buf1.Reset() - w.buf1.PutBE32(v) - w.buf1.WriteToHash(w.crc32) - if err := w.write(w.buf1.Get()); err != nil { - return err - } - } - - // Write out the length. - w.buf1.Reset() - l := w.f.pos - startPos - 4 - if l > math.MaxUint32 { - return fmt.Errorf("label index size exceeds 4 bytes: %d", l) - } - w.buf1.PutBE32int(int(l)) - if err := w.writeAt(w.buf1.Get(), startPos); err != nil { - return err - } - - w.buf1.Reset() - w.buf1.PutHashSum(w.crc32) - return w.write(w.buf1.Get()) -} - -// writeLabelIndexesOffsetTable writes the label indices offset table. -func (w *Writer) writeLabelIndexesOffsetTable() error { - startPos := w.f.pos - // Leave 4 bytes of space for the length, which will be calculated later. - if err := w.write([]byte("alen")); err != nil { - return err - } - w.crc32.Reset() - - w.buf1.Reset() - w.buf1.PutBE32int(len(w.labelIndexes)) - w.buf1.WriteToHash(w.crc32) - if err := w.write(w.buf1.Get()); err != nil { - return err - } - - for _, e := range w.labelIndexes { - w.buf1.Reset() - w.buf1.PutUvarint(len(e.keys)) - for _, k := range e.keys { - w.buf1.PutUvarintStr(k) - } - w.buf1.PutUvarint64(e.offset) - w.buf1.WriteToHash(w.crc32) - if err := w.write(w.buf1.Get()); err != nil { - return err - } - } - - // Write out the length. - err := w.writeLengthAndHash(startPos) - if err != nil { - return fmt.Errorf("label indexes offset table length/crc32 write error: %w", err) - } - return nil -} - // writePostingsOffsetTable writes the postings offset table. func (w *Writer) writePostingsOffsetTable() error { // Ensure everything is in the temporary file. @@ -1049,11 +900,6 @@ func (w *Writer) writePostings() error { return nil } -type labelIndexHashEntry struct { - keys []string - offset uint64 -} - func (w *Writer) Close() error { // Even if this fails, we need to close all the files. ensureErr := w.ensureStage(idxStageDone) @@ -1845,7 +1691,7 @@ func (r *Reader) postingsForLabelMatchingV1(ctx context.Context, name string, ma // SortedPostings returns the given postings list reordered so that the backing series // are sorted. -func (r *Reader) SortedPostings(p Postings) Postings { +func (*Reader) SortedPostings(p Postings) Postings { return p } @@ -1920,7 +1766,7 @@ func (s *stringListIter) Next() bool { return true } func (s stringListIter) At() string { return s.cur } -func (s stringListIter) Err() error { return nil } +func (stringListIter) Err() error { return nil } // Decoder provides decoding methods for the v1 and v2 index file format. // @@ -1946,7 +1792,7 @@ func DecodePostingsRaw(d encoding.Decbuf) (int, Postings, error) { // LabelNamesOffsetsFor decodes the offsets of the name symbols for a given series. // They are returned in the same order they're stored, which should be sorted lexicographically. -func (dec *Decoder) LabelNamesOffsetsFor(b []byte) ([]uint32, error) { +func (*Decoder) LabelNamesOffsetsFor(b []byte) ([]uint32, error) { d := encoding.Decbuf{B: b} k := d.Uvarint() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index 75e3c2c1487..55954e8a881 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -564,10 +564,10 @@ type errPostings struct { err error } -func (e errPostings) Next() bool { return false } -func (e errPostings) Seek(storage.SeriesRef) bool { return false } -func (e errPostings) At() storage.SeriesRef { return 0 } -func (e errPostings) Err() error { return e.err } +func (errPostings) Next() bool { return false } +func (errPostings) Seek(storage.SeriesRef) bool { return false } +func (errPostings) At() storage.SeriesRef { return 0 } +func (e errPostings) Err() error { return e.err } var emptyPostings = errPostings{} @@ -607,53 +607,54 @@ func Intersect(its ...Postings) Postings { } type intersectPostings struct { - arr []Postings - cur storage.SeriesRef + postings []Postings // These are the postings we will be intersecting. + current storage.SeriesRef // The current intersection, if Seek() or Next() has returned true. } func newIntersectPostings(its ...Postings) *intersectPostings { - return &intersectPostings{arr: its} + return &intersectPostings{postings: its} } func (it *intersectPostings) At() storage.SeriesRef { - return it.cur + return it.current } -func (it *intersectPostings) doNext() bool { -Loop: +func (it *intersectPostings) Seek(target storage.SeriesRef) bool { for { - for _, p := range it.arr { - if !p.Seek(it.cur) { + allEqual := true + for _, p := range it.postings { + if !p.Seek(target) { return false } - if p.At() > it.cur { - it.cur = p.At() - continue Loop + if p.At() > target { + target = p.At() + allEqual = false } } - return true + + // if all p.At() are all equal, we found an intersection. + if allEqual { + it.current = target + return true + } } } func (it *intersectPostings) Next() bool { - for _, p := range it.arr { + target := it.current + for _, p := range it.postings { if !p.Next() { return false } - if p.At() > it.cur { - it.cur = p.At() + if p.At() > target { + target = p.At() } } - return it.doNext() -} - -func (it *intersectPostings) Seek(id storage.SeriesRef) bool { - it.cur = id - return it.doNext() + return it.Seek(target) } func (it *intersectPostings) Err() error { - for _, p := range it.arr { + for _, p := range it.postings { if p.Err() != nil { return p.Err() } @@ -861,7 +862,7 @@ func (it *ListPostings) Seek(x storage.SeriesRef) bool { return false } -func (it *ListPostings) Err() error { +func (*ListPostings) Err() error { return nil } @@ -914,7 +915,7 @@ func (it *bigEndianPostings) Seek(x storage.SeriesRef) bool { return false } -func (it *bigEndianPostings) Err() error { +func (*bigEndianPostings) Err() error { return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go index ddc5376df05..af8f9b1f83f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go @@ -386,7 +386,7 @@ func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) { return NewHeadAndOOOChunkReader(ch.head, ch.mint, ch.maxt, nil, nil, ch.lastMmapRef), nil } -func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) { +func (*OOOCompactionHead) Tombstones() (tombstones.Reader, error) { return tombstones.NewMemTombstones(), nil } @@ -418,7 +418,7 @@ func (ch *OOOCompactionHead) CloneForTimeRange(mint, maxt int64) *OOOCompactionH } } -func (ch *OOOCompactionHead) Size() int64 { return 0 } +func (*OOOCompactionHead) Size() int64 { return 0 } func (ch *OOOCompactionHead) MinTime() int64 { return ch.mint } func (ch *OOOCompactionHead) MaxTime() int64 { return ch.maxt } func (ch *OOOCompactionHead) ChunkRange() int64 { return ch.chunkRange } @@ -446,15 +446,15 @@ func (ir *OOOCompactionHeadIndexReader) Postings(_ context.Context, name string, return index.NewListPostings(ir.ch.postings), nil } -func (ir *OOOCompactionHeadIndexReader) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings { +func (*OOOCompactionHeadIndexReader) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings { return index.ErrPostings(errors.New("not supported")) } -func (ir *OOOCompactionHeadIndexReader) PostingsForAllLabelValues(context.Context, string) index.Postings { +func (*OOOCompactionHeadIndexReader) PostingsForAllLabelValues(context.Context, string) index.Postings { return index.ErrPostings(errors.New("not supported")) } -func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.Postings { +func (*OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.Postings { // This will already be sorted from the Postings() call above. return p } @@ -484,31 +484,31 @@ func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *l return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, 0, chks) } -func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, _ string, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, error) { +func (*OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, _ string, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, error) { return nil, errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelValues(_ context.Context, _ string, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, error) { +func (*OOOCompactionHeadIndexReader) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, error) { return nil, errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(_ context.Context, _ bool, _ ...*labels.Matcher) (index.Postings, error) { +func (*OOOCompactionHeadIndexReader) PostingsForMatchers(context.Context, bool, ...*labels.Matcher) (index.Postings, error) { return nil, errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelNames(context.Context, ...*labels.Matcher) ([]string, error) { +func (*OOOCompactionHeadIndexReader) LabelNames(context.Context, ...*labels.Matcher) ([]string, error) { return nil, errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) { +func (*OOOCompactionHeadIndexReader) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) { return "", errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(_ context.Context, _ index.Postings) ([]string, error) { +func (*OOOCompactionHeadIndexReader) LabelNamesFor(context.Context, index.Postings) ([]string, error) { return nil, errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) Close() error { +func (*OOOCompactionHeadIndexReader) Close() error { return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index 0943c760cd2..788991235f9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -588,7 +588,7 @@ func (b *blockBaseSeriesSet) Err() error { return b.p.Err() } -func (b *blockBaseSeriesSet) Warnings() annotations.Annotations { return nil } +func (*blockBaseSeriesSet) Warnings() annotations.Annotations { return nil } // populateWithDelGenericSeriesIterator allows to iterate over given chunk // metas. In each iteration it ensures that chunks are trimmed based on given @@ -1266,4 +1266,4 @@ func (cr nopChunkReader) ChunkOrIterable(chunks.Meta) (chunkenc.Chunk, chunkenc. return cr.emptyChunk, nil, nil } -func (cr nopChunkReader) Close() error { return nil } +func (nopChunkReader) Close() error { return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go index 692976cdf84..76f44c0cd7d 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go @@ -204,13 +204,13 @@ type Decoder struct { builder labels.ScratchBuilder } -func NewDecoder(_ *labels.SymbolTable) Decoder { // FIXME remove t +func NewDecoder(*labels.SymbolTable) Decoder { // FIXME remove t return Decoder{builder: labels.NewScratchBuilder(0)} } // Type returns the type of the record. // Returns RecordUnknown if no valid record type is found. -func (d *Decoder) Type(rec []byte) Type { +func (*Decoder) Type(rec []byte) Type { if len(rec) < 1 { return Unknown } @@ -247,7 +247,7 @@ func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) { } // Metadata appends metadata in rec to the given slice. -func (d *Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, error) { +func (*Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, error) { dec := encoding.Decbuf{B: rec} if Type(dec.Byte()) != Metadata { @@ -302,7 +302,7 @@ func (d *Decoder) DecodeLabels(dec *encoding.Decbuf) labels.Labels { } // Samples appends samples in rec to the given slice. -func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) { +func (*Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) { dec := encoding.Decbuf{B: rec} if Type(dec.Byte()) != Samples { @@ -341,7 +341,7 @@ func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) } // Tombstones appends tombstones in rec to the given slice. -func (d *Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombstones.Stone, error) { +func (*Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombstones.Stone, error) { dec := encoding.Decbuf{B: rec} if Type(dec.Byte()) != Tombstones { @@ -405,7 +405,7 @@ func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemp return exemplars, nil } -func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMarker, error) { +func (*Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMarker, error) { dec := encoding.Decbuf{B: rec} t := Type(dec.Byte()) if t != MmapMarkers { @@ -433,7 +433,7 @@ func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMar return markers, nil } -func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) { +func (*Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) { dec := encoding.Decbuf{B: rec} t := Type(dec.Byte()) if t != HistogramSamples && t != CustomBucketsHistogramSamples { @@ -525,7 +525,7 @@ func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) { } } -func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) { +func (*Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) { dec := encoding.Decbuf{B: rec} t := Type(dec.Byte()) if t != FloatHistogramSamples && t != CustomBucketsFloatHistogramSamples { @@ -622,7 +622,7 @@ func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) { type Encoder struct{} // Series appends the encoded series to b and returns the resulting slice. -func (e *Encoder) Series(series []RefSeries, b []byte) []byte { +func (*Encoder) Series(series []RefSeries, b []byte) []byte { buf := encoding.Encbuf{B: b} buf.PutByte(byte(Series)) @@ -634,7 +634,7 @@ func (e *Encoder) Series(series []RefSeries, b []byte) []byte { } // Metadata appends the encoded metadata to b and returns the resulting slice. -func (e *Encoder) Metadata(metadata []RefMetadata, b []byte) []byte { +func (*Encoder) Metadata(metadata []RefMetadata, b []byte) []byte { buf := encoding.Encbuf{B: b} buf.PutByte(byte(Metadata)) @@ -665,7 +665,7 @@ func EncodeLabels(buf *encoding.Encbuf, lbls labels.Labels) { } // Samples appends the encoded samples to b and returns the resulting slice. -func (e *Encoder) Samples(samples []RefSample, b []byte) []byte { +func (*Encoder) Samples(samples []RefSample, b []byte) []byte { buf := encoding.Encbuf{B: b} buf.PutByte(byte(Samples)) @@ -689,7 +689,7 @@ func (e *Encoder) Samples(samples []RefSample, b []byte) []byte { } // Tombstones appends the encoded tombstones to b and returns the resulting slice. -func (e *Encoder) Tombstones(tstones []tombstones.Stone, b []byte) []byte { +func (*Encoder) Tombstones(tstones []tombstones.Stone, b []byte) []byte { buf := encoding.Encbuf{B: b} buf.PutByte(byte(Tombstones)) @@ -716,7 +716,7 @@ func (e *Encoder) Exemplars(exemplars []RefExemplar, b []byte) []byte { return buf.Get() } -func (e *Encoder) EncodeExemplarsIntoBuffer(exemplars []RefExemplar, buf *encoding.Encbuf) { +func (*Encoder) EncodeExemplarsIntoBuffer(exemplars []RefExemplar, buf *encoding.Encbuf) { // Store base timestamp and base reference number of first sample. // All samples encode their timestamp and ref as delta to those. first := exemplars[0] @@ -732,7 +732,7 @@ func (e *Encoder) EncodeExemplarsIntoBuffer(exemplars []RefExemplar, buf *encodi } } -func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte { +func (*Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte { buf := encoding.Encbuf{B: b} buf.PutByte(byte(MmapMarkers)) @@ -744,7 +744,7 @@ func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte { return buf.Get() } -func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([]byte, []RefHistogramSample) { +func (*Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([]byte, []RefHistogramSample) { buf := encoding.Encbuf{B: b} buf.PutByte(byte(HistogramSamples)) @@ -778,7 +778,7 @@ func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([ return buf.Get(), customBucketHistograms } -func (e *Encoder) CustomBucketsHistogramSamples(histograms []RefHistogramSample, b []byte) []byte { +func (*Encoder) CustomBucketsHistogramSamples(histograms []RefHistogramSample, b []byte) []byte { buf := encoding.Encbuf{B: b} buf.PutByte(byte(CustomBucketsHistogramSamples)) @@ -843,7 +843,7 @@ func EncodeHistogram(buf *encoding.Encbuf, h *histogram.Histogram) { } } -func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) ([]byte, []RefFloatHistogramSample) { +func (*Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) ([]byte, []RefFloatHistogramSample) { buf := encoding.Encbuf{B: b} buf.PutByte(byte(FloatHistogramSamples)) @@ -878,7 +878,7 @@ func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b return buf.Get(), customBucketsFloatHistograms } -func (e *Encoder) CustomBucketsFloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { +func (*Encoder) CustomBucketsFloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { buf := encoding.Encbuf{B: b} buf.PutByte(byte(CustomBucketsFloatHistogramSamples)) diff --git a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go index 5da360b69ab..f8070ff3431 100644 --- a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go +++ b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go @@ -147,7 +147,7 @@ var ( IncompatibleBucketLayoutInBinOpWarning = fmt.Errorf("%w: incompatible bucket layout encountered for binary operator", PromQLWarning) PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo) - PossibleNonCounterLabelInfo = fmt.Errorf("%w: metric might not be a counter, __type__ label is not set to %q", PromQLInfo, model.MetricTypeCounter) + PossibleNonCounterLabelInfo = fmt.Errorf("%w: metric might not be a counter, __type__ label is not set to %q or %q", PromQLInfo, model.MetricTypeCounter, model.MetricTypeHistogram) HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) IncompatibleTypesInBinOpInfo = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo) HistogramIgnoredInAggregationInfo = fmt.Errorf("%w: ignored histogram in", PromQLInfo) diff --git a/vendor/github.com/prometheus/prometheus/util/compression/buffers.go b/vendor/github.com/prometheus/prometheus/util/compression/buffers.go index 765bc64c0ba..f510efc042f 100644 --- a/vendor/github.com/prometheus/prometheus/util/compression/buffers.go +++ b/vendor/github.com/prometheus/prometheus/util/compression/buffers.go @@ -75,11 +75,11 @@ func (b *concurrentEBuffer) zstdEncBuf() *zstd.Encoder { // TODO(bwplotka): We could use pool, but putting it back into the pool needs to be // on the caller side, so no pool for now. -func (b *concurrentEBuffer) get() []byte { +func (*concurrentEBuffer) get() []byte { return nil } -func (b *concurrentEBuffer) set([]byte) {} +func (*concurrentEBuffer) set([]byte) {} type DecodeBuffer interface { zstdDecBuf() *zstd.Decoder @@ -135,8 +135,8 @@ func (b *concurrentDBuffer) zstdDecBuf() *zstd.Decoder { return b.r } -func (b *concurrentDBuffer) get() []byte { +func (*concurrentDBuffer) get() []byte { return nil } -func (b *concurrentDBuffer) set([]byte) {} +func (*concurrentDBuffer) set([]byte) {} diff --git a/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go b/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go index 0e0e1871c33..f0e9b90a62b 100644 --- a/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go +++ b/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go @@ -323,7 +323,7 @@ func NewQuerySamples(enablePerStepStats bool) *QuerySamples { return &qs } -func (qs *QuerySamples) NewChild() *QuerySamples { +func (*QuerySamples) NewChild() *QuerySamples { return NewQuerySamples(false) } diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/context.go b/vendor/github.com/prometheus/prometheus/util/testutil/context.go index ea4b0e3746b..ca137ceeb65 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/context.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/context.go @@ -27,7 +27,7 @@ type MockContext struct { } // Deadline always will return not set. -func (c *MockContext) Deadline() (deadline time.Time, ok bool) { +func (*MockContext) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } @@ -42,7 +42,7 @@ func (c *MockContext) Err() error { } // Value ignores the Value and always returns nil. -func (c *MockContext) Value(interface{}) interface{} { +func (*MockContext) Value(interface{}) interface{} { return nil } diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/directory.go b/vendor/github.com/prometheus/prometheus/util/testutil/directory.go index 38dabd1830d..2f2af69cd3a 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/directory.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/directory.go @@ -77,7 +77,7 @@ type ( } ) -func (c nilCloser) Close() { +func (nilCloser) Close() { } func (c callbackCloser) Close() { diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index 5002fad27e4..b5cc22b7124 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -186,6 +186,7 @@ type TSDBAdminStats interface { Snapshot(dir string, withHead bool) error Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) WALReplayStatus() (tsdb.WALReplayStatus, error) + BlockMetas() ([]tsdb.BlockMeta, error) } type QueryOpts interface { @@ -265,6 +266,7 @@ func NewAPI( otlpEnabled, otlpDeltaToCumulative, otlpNativeDeltaIngestion bool, ctZeroIngestionEnabled bool, lookbackDelta time.Duration, + enableTypeAndUnitLabels bool, ) *API { a := &API{ QueryEngine: qe, @@ -312,9 +314,10 @@ func NewAPI( } if otlpEnabled { a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{ - ConvertDelta: otlpDeltaToCumulative, - NativeDelta: otlpNativeDeltaIngestion, - LookbackDelta: lookbackDelta, + ConvertDelta: otlpDeltaToCumulative, + NativeDelta: otlpNativeDeltaIngestion, + LookbackDelta: lookbackDelta, + EnableTypeAndUnitLabels: enableTypeAndUnitLabels, }) } @@ -409,6 +412,7 @@ func (api *API) Register(r *route.Router) { r.Get("/status/buildinfo", wrap(api.serveBuildInfo)) r.Get("/status/flags", wrap(api.serveFlags)) r.Get("/status/tsdb", wrapAgent(api.serveTSDBStatus)) + r.Get("/status/tsdb/blocks", wrapAgent(api.serveTSDBBlocks)) r.Get("/status/walreplay", api.serveWALReplayStatus) r.Get("/notifications", api.notifications) r.Get("/notifications/live", api.notificationsSSE) @@ -441,7 +445,7 @@ func invalidParamError(err error, parameter string) apiFuncResult { }, nil, nil} } -func (api *API) options(*http.Request) apiFuncResult { +func (*API) options(*http.Request) apiFuncResult { return apiFuncResult{nil, nil, nil, nil} } @@ -514,7 +518,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { }, nil, warnings, qry.Close} } -func (api *API) formatQuery(r *http.Request) (result apiFuncResult) { +func (*API) formatQuery(r *http.Request) (result apiFuncResult) { expr, err := parser.ParseExpr(r.FormValue("query")) if err != nil { return invalidParamError(err, "query") @@ -523,7 +527,7 @@ func (api *API) formatQuery(r *http.Request) (result apiFuncResult) { return apiFuncResult{expr.Pretty(0), nil, nil, nil} } -func (api *API) parseQuery(r *http.Request) apiFuncResult { +func (*API) parseQuery(r *http.Request) apiFuncResult { expr, err := parser.ParseExpr(r.FormValue("query")) if err != nil { return invalidParamError(err, "query") @@ -994,7 +998,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { return apiFuncResult{metrics, nil, warnings, closer} } -func (api *API) dropSeries(_ *http.Request) apiFuncResult { +func (*API) dropSeries(*http.Request) apiFuncResult { return apiFuncResult{nil, &apiError{errorInternal, errors.New("not implemented")}, nil, nil} } @@ -1688,7 +1692,7 @@ type prometheusConfig struct { YAML string `json:"yaml"` } -func (api *API) serveRuntimeInfo(_ *http.Request) apiFuncResult { +func (api *API) serveRuntimeInfo(*http.Request) apiFuncResult { status, err := api.runtimeInfo() if err != nil { return apiFuncResult{status, &apiError{errorInternal, err}, nil, nil} @@ -1696,18 +1700,18 @@ func (api *API) serveRuntimeInfo(_ *http.Request) apiFuncResult { return apiFuncResult{status, nil, nil, nil} } -func (api *API) serveBuildInfo(_ *http.Request) apiFuncResult { +func (api *API) serveBuildInfo(*http.Request) apiFuncResult { return apiFuncResult{api.buildInfo, nil, nil, nil} } -func (api *API) serveConfig(_ *http.Request) apiFuncResult { +func (api *API) serveConfig(*http.Request) apiFuncResult { cfg := &prometheusConfig{ YAML: api.config().String(), } return apiFuncResult{cfg, nil, nil, nil} } -func (api *API) serveFlags(_ *http.Request) apiFuncResult { +func (api *API) serveFlags(*http.Request) apiFuncResult { return apiFuncResult{api.flagsMap, nil, nil, nil} } @@ -1745,6 +1749,19 @@ func TSDBStatsFromIndexStats(stats []index.Stat) []TSDBStat { return result } +func (api *API) serveTSDBBlocks(*http.Request) apiFuncResult { + blockMetas, err := api.db.BlockMetas() + if err != nil { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("error getting block metadata: %w", err)}, nil, nil} + } + + return apiFuncResult{ + data: map[string][]tsdb.BlockMeta{ + "blocks": blockMetas, + }, + } +} + func (api *API) serveTSDBStatus(r *http.Request) apiFuncResult { limit := 10 if s := r.FormValue("limit"); s != "" { diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/json_codec.go b/vendor/github.com/prometheus/prometheus/web/api/v1/json_codec.go index 6bd095a8f3d..3aa66adfd3e 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/json_codec.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/json_codec.go @@ -38,15 +38,15 @@ func init() { // JSONCodec is a Codec that encodes API responses as JSON. type JSONCodec struct{} -func (j JSONCodec) ContentType() MIMEType { +func (JSONCodec) ContentType() MIMEType { return MIMEType{Type: "application", SubType: "json"} } -func (j JSONCodec) CanEncode(_ *Response) bool { +func (JSONCodec) CanEncode(*Response) bool { return true } -func (j JSONCodec) Encode(resp *Response) ([]byte, error) { +func (JSONCodec) Encode(resp *Response) ([]byte, error) { json := jsoniter.ConfigCompatibleWithStandardLibrary return json.Marshal(resp) } diff --git a/vendor/modules.txt b/vendor/modules.txt index c44481ae754..d42a0ff386c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -211,7 +211,7 @@ github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/ssooidc github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -# github.com/aws/aws-sdk-go-v2 v1.36.3 +# github.com/aws/aws-sdk-go-v2 v1.37.0 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/defaults @@ -253,19 +253,19 @@ github.com/aws/aws-sdk-go-v2/credentials/stscreds ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 # github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/ini -# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 +# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url # github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 @@ -283,7 +283,7 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc/types github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -# github.com/aws/smithy-go v1.22.3 +# github.com/aws/smithy-go v1.22.5 ## explicit; go 1.22 github.com/aws/smithy-go github.com/aws/smithy-go/auth @@ -1060,7 +1060,7 @@ github.com/prometheus/common/version # github.com/prometheus/exporter-toolkit v0.14.0 ## explicit; go 1.22 github.com/prometheus/exporter-toolkit/web -# github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588 +# github.com/prometheus/otlptranslator v0.0.0-20250731173911-a9673827589a ## explicit; go 1.23.0 github.com/prometheus/otlptranslator # github.com/prometheus/procfs v0.16.1 @@ -1068,7 +1068,7 @@ github.com/prometheus/otlptranslator github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.305.1-0.20250721065454-b09cf6be8d56 +# github.com/prometheus/prometheus v0.305.1-0.20250808023455-1e4144a496fb ## explicit; go 1.23.0 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery