diff --git a/go.mod b/go.mod index 7bd59cbd51..7b2fb3e8f4 100644 --- a/go.mod +++ b/go.mod @@ -6,17 +6,20 @@ require ( github.com/andybalholm/brotli v1.1.1 github.com/atombender/go-jsonschema v0.16.1-0.20240916205339-a74cd4e2851c github.com/bytecodealliance/wasmtime-go/v23 v23.0.0 + github.com/cespare/xxhash/v2 v2.3.0 github.com/confluentinc/confluent-kafka-go/v2 v2.3.0 github.com/dominikbraun/graph v0.23.0 github.com/fxamacker/cbor/v2 v2.5.0 github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 + github.com/go-kit/log v0.2.1 github.com/go-playground/validator/v10 v10.4.1 - github.com/go-viper/mapstructure/v2 v2.1.0 + github.com/go-viper/mapstructure/v2 v2.2.1 + github.com/gogo/protobuf v1.3.2 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 - github.com/hashicorp/consul/sdk v0.16.0 - github.com/hashicorp/go-hclog v1.5.0 + github.com/hashicorp/consul/sdk v0.16.1 + github.com/hashicorp/go-hclog v1.6.3 github.com/hashicorp/go-plugin v1.6.2 github.com/iancoleman/strcase v0.3.0 github.com/invopop/jsonschema v0.12.0 @@ -24,68 +27,151 @@ require ( github.com/jmoiron/sqlx v1.4.0 github.com/jonboulle/clockwork v0.4.0 github.com/jpillora/backoff v1.0.0 + github.com/klauspost/compress v1.17.11 github.com/lib/pq v1.10.9 github.com/linkedin/goavro/v2 v2.12.0 github.com/marcboeker/go-duckdb v1.8.3 github.com/pelletier/go-toml/v2 v2.2.0 - github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_golang v1.20.5 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.60.1 + github.com/prometheus/prometheus v0.54.1 github.com/riferrei/srclient v0.5.4 github.com/santhosh-tekuri/jsonschema/v5 v5.2.0 github.com/scylladb/go-reflectx v1.0.1 github.com/shopspring/decimal v1.4.0 github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12 - github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 - go.opentelemetry.io/otel v1.30.0 + github.com/stretchr/testify v1.10.0 + go.opentelemetry.io/collector/component v0.115.0 + go.opentelemetry.io/collector/component/componenttest v0.115.0 + go.opentelemetry.io/collector/config/configauth v0.115.0 + go.opentelemetry.io/collector/config/configcompression v1.21.0 + go.opentelemetry.io/collector/config/configgrpc v0.115.0 + go.opentelemetry.io/collector/config/configopaque v1.21.0 + go.opentelemetry.io/collector/config/configretry v1.21.0 + go.opentelemetry.io/collector/config/configtelemetry v0.115.0 + go.opentelemetry.io/collector/config/configtls v1.21.0 + go.opentelemetry.io/collector/confmap v1.21.0 + go.opentelemetry.io/collector/consumer v1.21.0 + go.opentelemetry.io/collector/consumer/consumertest v0.115.0 + go.opentelemetry.io/collector/exporter v0.115.0 + go.opentelemetry.io/collector/exporter/exportertest v0.115.0 + go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0 + go.opentelemetry.io/collector/pdata v1.21.0 + go.opentelemetry.io/collector/receiver v0.115.0 + go.opentelemetry.io/collector/receiver/receivertest v0.115.0 + go.opentelemetry.io/collector/semconv v0.115.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 + go.opentelemetry.io/otel v1.32.0 go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.0.0-20240823153156-2a54df7bffb9 - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 - go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.4.0 - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 - go.opentelemetry.io/otel/log v0.6.0 - go.opentelemetry.io/otel/metric v1.30.0 - go.opentelemetry.io/otel/sdk v1.30.0 - go.opentelemetry.io/otel/sdk/log v0.6.0 - go.opentelemetry.io/otel/sdk/metric v1.30.0 - go.opentelemetry.io/otel/trace v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 + go.opentelemetry.io/otel/log v0.8.0 + go.opentelemetry.io/otel/metric v1.32.0 + go.opentelemetry.io/otel/sdk v1.32.0 + go.opentelemetry.io/otel/sdk/log v0.7.0 + go.opentelemetry.io/otel/sdk/metric v1.32.0 + go.opentelemetry.io/otel/trace v1.32.0 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.28.0 + golang.org/x/crypto v0.29.0 golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 golang.org/x/tools v0.26.0 gonum.org/v1/gonum v0.15.1 google.golang.org/grpc v1.67.1 - google.golang.org/protobuf v1.35.1 + google.golang.org/protobuf v1.35.2 + gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v3 v3.0.1 sigs.k8s.io/yaml v1.4.0 ) require ( + cloud.google.com/go/auth v0.7.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/Code-Hex/go-generics-cache v1.5.1 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect github.com/apache/arrow-go/v18 v18.0.0 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/aws/aws-sdk-go v1.54.19 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect + github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dennwc/varint v1.0.0 // indirect + github.com/digitalocean/godo v1.118.0 // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/docker v27.3.1+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/fatih/color v1.17.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/swag v0.22.9 // indirect github.com/go-playground/locales v0.13.0 // indirect github.com/go-playground/universal-translator v0.17.0 // indirect + github.com/go-resty/resty/v2 v2.13.1 // indirect + github.com/go-zookeeper/zk v1.0.3 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/goccy/go-yaml v1.12.0 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/flatbuffers v24.3.25+incompatible // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.5 // indirect + github.com/gophercloud/gophercloud v1.13.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/hashicorp/consul/api v1.29.2 // indirect + github.com/hashicorp/cronexpr v1.1.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 // indirect + github.com/hashicorp/serf v0.10.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect + github.com/hetznercloud/hcloud-go/v2 v2.10.2 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/ionos-cloud/sdk-go/v6 v6.1.11 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.14.3 // indirect github.com/jackc/pgio v1.0.0 // indirect @@ -93,37 +179,98 @@ require ( github.com/jackc/pgproto3/v2 v2.3.3 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/pgtype v1.14.0 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect + github.com/knadh/koanf/maps v0.1.1 // indirect + github.com/knadh/koanf/providers/confmap v0.1.0 // indirect + github.com/knadh/koanf/v2 v2.1.2 // indirect + github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/go-urn v1.2.0 // indirect + github.com/linode/linodego v1.37.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/miekg/dns v1.1.61 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mostynb/go-grpc-compression v1.2.3 // indirect github.com/mr-tron/base58 v1.2.0 // indirect - github.com/oklog/run v1.0.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect + github.com/ovh/go-ovh v1.6.0 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/common/sigv4 v0.1.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/sanity-io/litter v1.5.5 // indirect + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 // indirect + github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect + github.com/vultr/govultr/v2 v2.17.2 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/collector/client v1.21.0 // indirect + go.opentelemetry.io/collector/config/confignet v1.21.0 // indirect + go.opentelemetry.io/collector/config/internal v0.115.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.115.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/extension v0.115.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.115.0 // indirect + go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 // indirect + go.opentelemetry.io/collector/featuregate v1.21.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.115.0 // indirect + go.opentelemetry.io/collector/pipeline v0.115.0 // indirect + go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.11.0 // indirect golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/term v0.26.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.5.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect + google.golang.org/api v0.188.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + k8s.io/api v0.29.3 // indirect + k8s.io/apimachinery v0.29.3 // indirect + k8s.io/client-go v0.29.3 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 623f00962f..2b4e3b631b 100644 --- a/go.sum +++ b/go.sum @@ -1,27 +1,104 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/auth v0.7.0 h1:kf/x9B3WTbBUHkC+1VS8wwwli9TzhSt0vSTVBmMR8Ts= +cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= +github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I= -github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= +github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/apache/arrow-go/v18 v18.0.0 h1:1dBDaSbH3LtulTyOVYaBCHO3yVRwjV+TZaqn3g6V7ZM= github.com/apache/arrow-go/v18 v18.0.0/go.mod h1:t6+cWRSmKgdQ6HsxisQjok+jBpKGhRDiqcf3p0p/F+A= github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/atombender/go-jsonschema v0.16.1-0.20240916205339-a74cd4e2851c h1:cxQVoh6kY+c4b0HUchHjGWBI8288VhH50qxKG3hdEg0= github.com/atombender/go-jsonschema v0.16.1-0.20240916205339-a74cd4e2851c/go.mod h1:3XzxudkrYVUvbduN/uI2fl4lSrMSzU0+3RCu2mpnfx8= +github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= +github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= @@ -31,18 +108,27 @@ github.com/bytecodealliance/wasmtime-go/v23 v23.0.0/go.mod h1:5YIL+Ouiww2zpO7u+i github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/confluentinc/confluent-kafka-go/v2 v2.3.0 h1:icCHutJouWlQREayFwCc7lxDAhws08td+W3/gdqgZts= github.com/confluentinc/confluent-kafka-go/v2 v2.3.0/go.mod h1:/VTy8iEpe6mD9pkCH5BhijlUl8ulUXymKv1Qig5Rgb8= -github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= -github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/containerd/containerd v1.6.8 h1:h4dOFDwzHmqFEP754PgfgTeVXFnLiRc6kiqC7tplDJs= github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -51,34 +137,72 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= -github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= +github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4= +github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dominikbraun/graph v0.23.0 h1:TdZB4pPqCLFxYhdyMFb1TBdFxp8XLcJfTTBQucVPgCo= github.com/dominikbraun/graph v0.23.0/go.mod h1:yOjYyogZLY1LSG9E33JWZJiq5k83Qy2C6POAuiViluc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= @@ -87,25 +211,47 @@ github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD87 github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= +github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= -github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= +github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/goccy/go-yaml v1.12.0 h1:/1WHjnMsI1dlIBQutrvSMGZRQufVO3asrHfTwfACoPM= github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -113,6 +259,7 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -121,39 +268,136 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6 github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g= +github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= +github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= +github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0= +github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA= github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= -github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= -github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw= +github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk= +github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= +github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= +github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= +github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= +github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.6.2 h1:zdGAEd0V1lCaU0u+MxWQhtSDQmahpkwOun8U8EiRVog= github.com/hashicorp/go-plugin v1.6.2/go.mod h1:CkgLQ5CZqNmdL9U9JzM532t8ZiYQ35+pj3b1FD37R0Q= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w= +github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hetznercloud/hcloud-go/v2 v2.10.2 h1:9gyTUPhfNbfbS40Spgij5mV5k37bOZgt8iHKCbfGs5I= +github.com/hetznercloud/hcloud-go/v2 v2.10.2/go.mod h1:xQ+8KhIS62W0D78Dpi57jsufWh844gUw1az5OUvaeq8= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= +github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -201,15 +445,33 @@ github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= +github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= @@ -217,8 +479,18 @@ github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IX github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= +github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= +github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= +github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -227,6 +499,8 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -238,20 +512,27 @@ github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/linkedin/goavro/v2 v2.9.7/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= github.com/linkedin/goavro/v2 v2.12.0 h1:rIQQSj8jdAUlKQh6DttK8wCRv4t4QO09g1C4aBWXslg= github.com/linkedin/goavro/v2 v2.12.0/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk= +github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkkso= +github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marcboeker/go-duckdb v1.8.3 h1:ZkYwiIZhbYsT6MmJsZ3UPTHrTZccDdM4ztoqSlEMXiQ= github.com/marcboeker/go-duckdb v1.8.3/go.mod h1:C9bYRE1dPYb1hhfu/SSomm78B0FXmNgRvv6YBW/Hooc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -259,82 +540,167 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= +github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= +github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= -github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0= -github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= -github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= +github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mostynb/go-grpc-compression v1.2.3 h1:42/BKWMy0KEJGSdWvzqIyOZ95YcR9mLPqKctH7Uo//I= +github.com/mostynb/go-grpc-compression v1.2.3/go.mod h1:AghIxF3P57umzqM9yz795+y1Vjs47Km/Y2FE6ouQ7Lg= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= -github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= +github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeBAsoHo= github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= +github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+YbcEqU7MQ= +github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= github.com/riferrei/srclient v0.5.4 h1:dfwyR5u23QF7beuVl2WemUY2KXh5+Sc4DHKyPXBNYuc= github.com/riferrei/srclient v0.5.4/go.mod h1:vbkLmWcgYa7JgfPvuy/+K8fTS0p1bApqadxrxi/S1MI= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sanity-io/litter v1.5.5 h1:iE+sBxPBzoK6uaEP5Lt3fHNgpKcHXc/A2HGETy0uJQo= github.com/sanity-io/litter v1.5.5/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U= github.com/santhosh-tekuri/jsonschema/v5 v5.0.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0= github.com/santhosh-tekuri/jsonschema/v5 v5.2.0 h1:WCcC4vZDS1tYNxjWlwRJZQy28r8CMoggKnxNzxsVDMQ= github.com/santhosh-tekuri/jsonschema/v5 v5.2.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/scylladb/go-reflectx v1.0.1 h1:b917wZM7189pZdlND9PbIJ6NQxfDPfBvUaQ7cjj1iZQ= github.com/scylladb/go-reflectx v1.0.1/go.mod h1:rWnOfDIRWBGN0miMLIcoPt/Dhi2doCMZqwMCJ3KupFc= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= +github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 h1:12ijqMM9tvYVEm+nR826WsrNi6zCKpwBhuApq127wHs= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7/go.mod h1:FX7/bVdoep147QQhsOPkYsPEXhGZjeYx6lBSaSXtZOA= github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12 h1:NzZGjaqez21I3DU7objl3xExTH4fxYvzTqar8DC6360= github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12/go.mod h1:fb1ZDVXACvu4frX3APHZaEBp0xi1DIm34DcA0CwTsZM= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -352,67 +718,163 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/testcontainers/testcontainers-go v0.14.0 h1:h0D5GaYG9mhOWr2qHdEKDXpkce/VlvaYOCzTRi6UBi8= github.com/testcontainers/testcontainers-go v0.14.0/go.mod h1:hSRGJ1G8Q5Bw2gXgPulJOLlEBaYJHeBSOkQM5JLG+JQ= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= +github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= -go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/collector v0.115.0 h1:qUZ0bTeNBudMxNQ7FJKS//TxTjeJ7tfU/z22mcFavWU= +go.opentelemetry.io/collector v0.115.0/go.mod h1:66qx0xKnVvdwq60e1DEfb4e+zmM9szhPsv2hxZ/Mpj4= +go.opentelemetry.io/collector/client v1.21.0 h1:3Kes8lOFMYVxoxeAmX+DTEAkuS1iTA3NkSfqzGmygJA= +go.opentelemetry.io/collector/client v1.21.0/go.mod h1:jYJGiL0UA975OOyHmjbQSokNWt1OiviI5KjPOMUMGwc= +go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= +go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= +go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= +go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= +go.opentelemetry.io/collector/config/configauth v0.115.0 h1:xa+ALdyPgva3rZnLBh1H2oS5MsHP6JxSqMtQmcELnys= +go.opentelemetry.io/collector/config/configauth v0.115.0/go.mod h1:C7anpb3Rf4KswMT+dgOzkW9UX0z/65PLORpUw3p0VYc= +go.opentelemetry.io/collector/config/configcompression v1.21.0 h1:0zbPdZAgPFMAarwJEC4gaR6f/JBP686A3TYSgb3oa+E= +go.opentelemetry.io/collector/config/configcompression v1.21.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= +go.opentelemetry.io/collector/config/configgrpc v0.115.0 h1:gZzXSFe6hB3RUcEeAYqk1yT+TBa+X9tp6/1x29Yg2yk= +go.opentelemetry.io/collector/config/configgrpc v0.115.0/go.mod h1:107lRZ5LdQPMdGJGd4m1GhyKxyH0az2cUOqrJgTEN8E= +go.opentelemetry.io/collector/config/confignet v1.21.0 h1:PeQ5YrMnfftysFL/WVaSrjPOWjD6DfeABY50pf9CZxU= +go.opentelemetry.io/collector/config/confignet v1.21.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= +go.opentelemetry.io/collector/config/configopaque v1.21.0 h1:PcvRGkBk4Px8BQM7tX+kw4i3jBsfAHGoGQbtZg6Ox7U= +go.opentelemetry.io/collector/config/configopaque v1.21.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= +go.opentelemetry.io/collector/config/configretry v1.21.0 h1:ZHoOvAkEcv5BBeaJn8IQ6rQ4GMPZWW4S+W7R4QTEbZU= +go.opentelemetry.io/collector/config/configretry v1.21.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= +go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= +go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/config/configtls v1.21.0 h1:ZfrlAYgBD8lzp04W0GxwiDmUbrvKsvDYJi+wkyiXlpA= +go.opentelemetry.io/collector/config/configtls v1.21.0/go.mod h1:5EsNefPfVCMOTlOrr3wyj7LrsOgY7V8iqRl8oFZEqtw= +go.opentelemetry.io/collector/config/internal v0.115.0 h1:eVk57iufZpUXyPJFKTb1Ebx5tmcCyroIlt427r5pxS8= +go.opentelemetry.io/collector/config/internal v0.115.0/go.mod h1:OVkadRWlKAoWjHslqjWtBLAne8ceQm8WYT71ZcBWLFc= +go.opentelemetry.io/collector/confmap v1.21.0 h1:1tIcx2/Suwg8VhuPmQw87ba0ludPmumpFCFRZZa6RXA= +go.opentelemetry.io/collector/confmap v1.21.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/consumer v1.21.0 h1:THKZ2Vbi6GkamjTBI2hFq5Dc4kINZTWGwQNa8d/Ty9g= +go.opentelemetry.io/collector/consumer v1.21.0/go.mod h1:FQcC4ThMtRYY41dv+IPNK8POLLhAFY3r1YR5fuP7iiY= +go.opentelemetry.io/collector/consumer/consumererror v0.115.0 h1:yli//xBCQMPZKXNgNlXemo4dvqhnFrAmCZ11DvQgmcY= +go.opentelemetry.io/collector/consumer/consumererror v0.115.0/go.mod h1:LwVzAvQ6ZVNG7mbOvurbAo+W/rKws0IcjOwriuZXqPE= +go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0 h1:gaIhzpaGFWauiyznrQ3f++TbcdXxA5rpsX3L9uGjMM8= +go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0/go.mod h1:7oXvuGBSawS5bc413lh1KEMcXkqBcrCqZQahOdnE24U= +go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 h1:H3fDuyQW1t2HWHkz96WMBQJKUevypOCjBqnqtaAWyoA= +go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0/go.mod h1:IzEmZ91Tp7TBxVDq8Cc9xvLsmO7H08njr6Pu9P5d9ns= +go.opentelemetry.io/collector/consumer/consumertest v0.115.0 h1:hru0I2447y0TluCdwlKYFFtgcpyCnlM+LiOK1JZyA70= +go.opentelemetry.io/collector/consumer/consumertest v0.115.0/go.mod h1:ybjALRJWR6aKNOzEMy1T1ruCULVDEjj4omtOJMrH/kU= +go.opentelemetry.io/collector/exporter v0.115.0 h1:JnxfpOnsuqhTPKJXVKJLS1Cv3BiVrVLzpHOjJEQw+xw= +go.opentelemetry.io/collector/exporter v0.115.0/go.mod h1:xof3fHQK8wADhaKLIJcQ7ChZaFLNC+haRdPN0wgl6kY= +go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0 h1:fetbc740pODH6JW+H49SW0hiAJwQE+/B0SbuIlaY2rg= +go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0/go.mod h1:oEKZ/d5BeaCK6Made9iwaeqmlT4lRbJSlW9nhIn/TwM= +go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 h1:lSQEleCn/q9eFufcuK61NdFKU70ZlgI9dBjPCO/4CrE= +go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0/go.mod h1:7l5K2AecimX2kx+nZC1gKG3QkP247CO1+SodmJ4fFkQ= +go.opentelemetry.io/collector/exporter/exportertest v0.115.0 h1:P9SMTUXQOtcaq40bGtnnAe14zRmR4/yUgj/Tb2BEf/k= +go.opentelemetry.io/collector/exporter/exportertest v0.115.0/go.mod h1:1jMZ9gFGXglb8wfNrBZIgd+RvpZhSyFwdfE+Jtf9w4U= +go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0 h1:Kqr31VFrQvgEMzeg8T1JSXWacjUQoZph39efKN8jBpY= +go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0/go.mod h1:5uy/gduFx2mH0GxJ84sY75NfzQJb9xYmgiL9Pf0dKF8= +go.opentelemetry.io/collector/extension v0.115.0 h1:/cBb8AUdD0KMWC6V3lvCC16eP9Fg0wd1Upcp5rgvuGI= +go.opentelemetry.io/collector/extension v0.115.0/go.mod h1:HI7Ak6loyi6ZrZPsQJW1OO1wbaAW8OqXLFNQlTZnreQ= +go.opentelemetry.io/collector/extension/auth v0.115.0 h1:TTMokbBsSHZRFH48PvGSJmgSS8F3Rkr9MWGHZn8eJDk= +go.opentelemetry.io/collector/extension/auth v0.115.0/go.mod h1:3w+2mzeb2OYNOO4Bi41TUo4jr32ap2y7AOq64IDpxQo= +go.opentelemetry.io/collector/extension/auth/authtest v0.115.0 h1:OZe7dKbZ01qodSpZU0ZYzI6zpmmzJ3UvfdBSFAbSgDw= +go.opentelemetry.io/collector/extension/auth/authtest v0.115.0/go.mod h1:fk9WCXP0x91Q64Z8HZKWTHh9PWtgoWE1KXe3n2Bff3U= +go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 h1:sZXw0+77092pq24CkUoTRoHQPLQUsDq6HFRNB0g5yR4= +go.opentelemetry.io/collector/extension/experimental/storage v0.115.0/go.mod h1:qjFH7Y3QYYs88By2ZB5GMSUN5k3ul4Brrq2J6lKACA0= +go.opentelemetry.io/collector/extension/extensiontest v0.115.0 h1:GBVFxFEskR8jSdu9uaQh2qpXnN5VNXhXjpJ2UjxtE8I= +go.opentelemetry.io/collector/extension/extensiontest v0.115.0/go.mod h1:eu1ecbz5mT+cHoH2H3GmD/rOO0WsicSJD2RLrYuOmRA= +go.opentelemetry.io/collector/featuregate v1.21.0 h1:+EULHPJDLMipcwAGZVp9Nm8NriRvoBBMxp7MSiIZVMI= +go.opentelemetry.io/collector/featuregate v1.21.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= +go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= +go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtitks4HJFO0SUWznTssmP94= +go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= +go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= +go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= +go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= +go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 h1:3l9ruCAOrssTUDnyChKNzHWOdTtfThnYaoPZ1/+5sD0= +go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0/go.mod h1:2Myg+law/5lcezo9PhhZ0wjCaLYdGK24s1jDWbSW9VY= +go.opentelemetry.io/collector/receiver v0.115.0 h1:55Q3Jvj6zHCIA1psKqi/3kEMJO4OqUF5tNAEYNdB1U8= +go.opentelemetry.io/collector/receiver v0.115.0/go.mod h1:nBSCh2O/WUcfgpJ+Jpz+B0z0Hn5jHeRvF2WmLij5EIY= +go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 h1:R9JLaj2Al93smIPUkbJshAkb/cY0H5JBOxIx+Zu0NG4= +go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0/go.mod h1:05E5hGujWeeXJmzKZwTdHyZ/+rRyrQlQB5p5Q2XY39M= +go.opentelemetry.io/collector/receiver/receivertest v0.115.0 h1:OiB684SbHQi6/Pd3ZH0cXjYvCpBS9ilQBfTQx0wVXHg= +go.opentelemetry.io/collector/receiver/receivertest v0.115.0/go.mod h1:Y8Z9U/bz9Xpyt8GI8DxZZgryw3mnnIw+AeKVLTD2cP8= +go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= +go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.0.0-20240823153156-2a54df7bffb9 h1:UiRNKd1OgqsLbFwE+wkAWTdiAxXtCBqKIHeBIse4FUA= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.0.0-20240823153156-2a54df7bffb9/go.mod h1:eqZlW3pJWhjyexnDPrdQxix1pn0wwhI4AO4GKpP/bMI= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 h1:QSKmLBzbFULSyHzOdO9JsN9lpE4zkrz1byYGmJecdVE= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0/go.mod h1:sTQ/NH8Yrirf0sJ5rWqVu+oT82i4zL9FaF6rWcqnptM= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 h1:VrMAbeJz4gnVDg2zEzjHG4dEH86j4jO6VYB+NgtGD8s= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0/go.mod h1:qqN/uFdpeitTvm+JDqqnjm517pmQRYxTORbETHq5tOc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.4.0 h1:0MH3f8lZrflbUWXVxyBg/zviDFdGE062uKh5+fu8Vv0= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.4.0/go.mod h1:Vh68vYiHY5mPdekTr0ox0sALsqjoVy0w3Os278yX5SQ= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0 h1:BJee2iLkfRfl9lc7aFmBwkWxY/RI1RDdXepSF6y8TPE= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0/go.mod h1:DIzlHs3DRscCIBU3Y9YSzPfScwnYnzfnCd4g8zA7bZc= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= -go.opentelemetry.io/otel/log v0.6.0 h1:nH66tr+dmEgW5y+F9LanGJUBYPrRgP4g2EkmPE3LeK8= -go.opentelemetry.io/otel/log v0.6.0/go.mod h1:KdySypjQHhP069JX0z/t26VHwa8vSwzgaKmXtIB3fJM= -go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= -go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= -go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= -go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= -go.opentelemetry.io/otel/sdk/log v0.6.0 h1:4J8BwXY4EeDE9Mowg+CyhWVBhTSLXVXodiXxS/+PGqI= -go.opentelemetry.io/otel/sdk/log v0.6.0/go.mod h1:L1DN8RMAduKkrwRAFDEX3E3TLOq46+XMGSbUfHU/+vE= -go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM= -go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y= -go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= -go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 h1:mMOmtYie9Fx6TSVzw4W+NTpvoaS1JWWga37oI1a/4qQ= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0/go.mod h1:yy7nDsMMBUkD+jeekJ36ur5f3jJIrmCwUrY67VFhNpA= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 h1:TwmL3O3fRR80m8EshBrd8YydEZMcUCsZXzOUlnFohwM= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0/go.mod h1:tH98dDv5KPmPThswbXA0fr0Lwfs+OhK8HgaCo7PjRrk= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= +go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= +go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk/log v0.7.0 h1:dXkeI2S0MLc5g0/AwxTZv6EUEjctiH8aG14Am56NTmQ= +go.opentelemetry.io/otel/sdk/log v0.7.0/go.mod h1:oIRXpW+WD6M8BuGj5rtS0aRu/86cbDV/dAfNaZBIjYM= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -426,103 +888,275 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -535,20 +1169,77 @@ golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhS golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw= +google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210401141331-865547bb08e2/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= @@ -560,24 +1251,63 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= +k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/pkg/promotel/Makefile b/pkg/promotel/Makefile new file mode 100644 index 0000000000..bfc6519fe4 --- /dev/null +++ b/pkg/promotel/Makefile @@ -0,0 +1,27 @@ +help: ## Print this help text + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-40s\033[0m %s\n", $$1, $$2}' + +.PHONY: \ + tidy \ + fmt \ + lint \ + test \ + run-example + +tidy: fmt ## run go mod tidy + go mod tidy + +fmt: ## run go fmt + go fmt ./... + +lint: ## run golangci-lint + golangci-lint run ./... + +test: ## run unit tests + go test -v ./... + +build: tidy ## build the demo + go build ./cmd/example.go + +run-example: ## run the example + go run ./cmd/example.go diff --git a/pkg/promotel/README.md b/pkg/promotel/README.md new file mode 100644 index 0000000000..e5ef4b7f6a --- /dev/null +++ b/pkg/promotel/README.md @@ -0,0 +1,157 @@ +# Package Overview +The package provides components for performing Prometheus to OTel metrics conversion. + +Main components: MetricsReceiver, MetricsExporter + +## Receiver +- Wraps [prometheusreceiver](github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver) +- Fetches prometheus metrics data via `prometheus.Gatherer` (same process memory, no HTTP calls) +- Uses custom implementation of `prometheus.scraper` (from here https://github.com/pkcll/prometheus/pull/1) to shortcut HTTP request calls and fetch data from `prometheus.Gatherer` +- Converts Prometheus metrics into OTel format using [prometheusreceiver](github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver) +- Passes OTel metrics data to downstream OTel [otlpexporter](go.opentelemetry.io/collector/exporter/otlpexporter) + +## Exporter +- Wraps [otlpexporter](go.opentelemetry.io/collector/exporter/otlpexporter) +- Receives metric data from the receiver +- Export OTel metrics data to otel collector endpoint via [otlpexporter](go.opentelemetry.io/collector/exporter/otlpexporter) + +## OTel collector prometheusreceiver + +[prometheusreceiver](github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver). +is a component of otel-collector which collects metrics from Prometheus endpoints. It scrapes the metrics at regular intervals and converts them into a format that can be processed by the rest of the collector pipeline. + +`promotel` is a wrapper around `prometheusreceiver` which provides a simple API to start and stop the receiver and process the metrics data. + +`promotel` uses `prometheusreceiver` factory to create an instance of the receiver via `factory.CreateMetrics` with provided configuration. It also provides a callback function which is called every time new metrics data is received. The metrics data is a `pmetric.Metrics` object which contains the metrics data received from the Prometheus endpoint. + +`promotel/inernal` contains implementations for `consumer.Metrics`, `component.Host`, `receiver.Settings`, `component.TelemetrySettings` which are dependencies required for `factory.CreateMetrics`. + +`metrics.Consumer` is an interface which is used to process the metrics data. The `prometheusreceiver` calls `Consumer.ConsumeMetrics` function every time new metrics data is received. + +`prometheusreceiver` has Start and Shutdown methods. + +`github.com/pkcll/prometheus v0.54.1-promotel` fork overrides the `prometheus` package to provide a way to scrape metrics directly from `prometheus.DefaultGatherer` without making HTTP requests to the Prometheus endpoint. This is useful when the Prometheus endpoint is not accessible from the collector. + +Example configuration: + + +```yaml +receivers: + prometheus: + config: + scrape_configs: + - job_name: 'example' + static_configs: + - targets: ['localhost:9090'] + +``` + +## OTel collector otlpexporter + +[otlpexporter](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlpexporter) is a component of the OpenTelemetry Collector that exports telemetry data (metrics, logs, and traces) using the OpenTelemetry Protocol (OTLP). It supports both gRPC and HTTP transport protocols. + +Example configuration: + +```yaml +exporters: + otlp: + endpoint: "localhost:4317" + tls: + insecure: true + retry_on_failure: + enabled: true + initial_interval: 5s + max_interval: 30s + max_elapsed_time: 300s + sending_queue: + enabled: true + queue_size: 5000 +``` + +### `promotel` usage example: + +```go +import ( + "context" + "fmt" + "time" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel" +) + +func main() { + exporterConfig, _ := promotel.NewDefaultExporterConfig() + exporter, _ := promotel.NewMetricExporter(exporterConfig, logger) + receiverConfig, _ := promotel.NewDefaultReceiverConfig() + // Fetches metrics data directly from DefaultGatherer without making HTTP requests to 127.0.0.1:8888 + receiver, _ := promotel.NewMetricReceiver(receiverConfig, prometheus.DefaultGatherer, exporter.Consumer().ConsumeMetrics, logger) + fmt.Println("Starting promotel pipeline") + exporter.Start(context.Background()) + receiver.Start(context.Background()) + defer receiver.Close() + defer exporter.Close() + time.Sleep(1 * time.Minute) +} +``` + +### Debug Metric Receiver + +`DebugMetricReceiver` is an implementation of `metrics.Consumer` which prints formatted metrics data to stdout. It is useful for testing purposes. + +### `Debug Metric Receiver` usage example: + +```go +... + // Debug metric receiver prints fetched metrics to stdout + receiver, err := promotel.NewDebugMetricReceiver(config, prometheus.DefaultGatherer, logger) + // Start metric receiver + receiver.Start(context.Background()) +... +``` + +Output example + +``` +NumberDataPoints #0 +StartTimestamp: 1970-01-01 00:00:00 +0000 UTC +Timestamp: 2025-01-02 18:38:28.905 +0000 UTC +Value: 44.000000 +Metric #18 +Descriptor: + -> Name: otelcol_exporter_sent_metric_points + -> Description: Number of metric points successfully sent to destination. + -> Unit: + -> DataType: Sum + -> IsMonotonic: true + -> AggregationTemporality: Cumulative +NumberDataPoints #0 +Data point attributes: + -> exporter: Str(debug) + -> service_version: Str(0.108.1) +StartTimestamp: 2025-01-02 18:38:05.905 +0000 UTC +Timestamp: 2025-01-02 18:38:28.905 +0000 UTC +Value: 137.000000 +NumberDataPoints #1 +Data point attributes: + -> exporter: Str(otlphttp) + -> service_version: Str(0.108.1) +StartTimestamp: 2025-01-02 18:38:05.905 +0000 UTC +Timestamp: 2025-01-02 18:38:28.905 +0000 UTC +Value: 137.000000 +Metric #19 +Descriptor: + -> Name: otelcol_process_cpu_seconds + -> Description: Total CPU user and system time in seconds + -> Unit: + -> DataType: Sum + -> IsMonotonic: true + -> AggregationTemporality: Cumulative +NumberDataPoints #0 +Data point attributes: + -> service_version: Str(0.108.1) +StartTimestamp: 2025-01-02 18:38:05.905 +0000 UTC +Timestamp: 2025-01-02 18:38:28.905 +0000 UTC +Value: 0.930000 +``` \ No newline at end of file diff --git a/pkg/promotel/cmd/example.go b/pkg/promotel/cmd/example.go new file mode 100644 index 0000000000..672d398e60 --- /dev/null +++ b/pkg/promotel/cmd/example.go @@ -0,0 +1,148 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + dto "github.com/prometheus/client_model/go" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel" +) + +const testCounterMetricName = "test_counter_metric" + +func reportMetrics(reg prometheus.Registerer, logger *zap.Logger) { + testCounter := promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: testCounterMetricName, + ConstLabels: prometheus.Labels{ + "app": "promotel-demo", + }, + }) + for { + testCounter.Inc() + m := &dto.Metric{} + _ = testCounter.Write(m) + logger.Info("Reported Prometheus metric ", zap.Any("name", testCounterMetricName), zap.Any("value", m.GetCounter().GetValue())) + time.Sleep(1 * time.Second) + } +} + +func gatherMetricsDirectly(reg prometheus.Gatherer, logger *zap.Logger) { + for { + mf, err := reg.Gather() + if err != nil { + fmt.Printf("Error gathering metrics: %v\n", err) + } + for _, metricFamily := range mf { + if *metricFamily.Name == testCounterMetricName { + for _, metric := range metricFamily.Metric { + logger.Info("Received Prometheus metric ", zap.Any("name", testCounterMetricName), zap.Any("value", metric.Counter.GetValue())) + } + } + } + time.Sleep(1 * time.Second) + } +} + +func startExporter(ctx context.Context, logger *zap.Logger) promotel.MetricExporter { + expConfig, err := promotel.NewExporterConfig(map[string]any{ + "endpoint": "localhost:4317", + "tls": map[string]any{ + "insecure": true, + }, + }) + if err != nil { + logger.Fatal("Failed to create exporter config", zap.Error(err)) + } + // Sends metrics data in OTLP format to otel-collector endpoint + exporter, err := promotel.NewMetricExporter(expConfig, logger) + if err != nil { + logger.Fatal("Failed to create metric exporter", zap.Error(err)) + } + err = exporter.Start(ctx) + if err != nil { + logger.Fatal("Failed to start exporter", zap.Error(err)) + } + return exporter +} + +func startMetricReceiver(reg prometheus.Gatherer, logger *zap.Logger, next consumer.ConsumeMetricsFunc) promotel.Runnable { + logger.Info("Starting promotel metric receiver") + config, err := promotel.NewDefaultReceiverConfig() + if err != nil { + logger.Fatal("Failed to create config", zap.Error(err)) + } + + // Gather metrics via promotel + // MetricReceiver fetches metrics from prometheus.Gatherer, then converts it to OTel format and writes formatted metrics to stdout + receiver, err := promotel.NewMetricReceiver(config, reg, next, logger) + + if err != nil { + logger.Fatal("Failed to create debug metric receiver", zap.Error(err)) + } + // Starts the promotel + if err := receiver.Start(context.Background()); err != nil { + logger.Fatal("Failed to start metric receiver", zap.Error(err)) + } + return receiver +} + +func main() { + logger, _ := zap.NewDevelopment() + + go reportMetrics(prometheus.DefaultRegisterer, logger) + // Gather metrics directly from DefaultGatherer to verify that the metrics are being reported + go gatherMetricsDirectly(prometheus.DefaultGatherer, logger) + + exporter := startExporter(context.Background(), logger) + // Fetches metrics from in memory prometheus.Gatherer and converts to OTel format + receiver := startMetricReceiver(prometheus.DefaultGatherer, logger, func(ctx context.Context, md pmetric.Metrics) error { + // Logs the converted OTel metric + logOtelMetric(md, testCounterMetricName, logger) + // Exports the converted OTel metric + return exporter.Consumer().ConsumeMetrics(ctx, md) + }) + + // Wait for a signal to exit + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, os.Interrupt, syscall.SIGTERM) + + // Block until a signal is received + <-signalChan + logger.Info("Exiting promotel") + // Gracefully shuts down promotel + if err := receiver.Close(); err != nil { + logger.Fatal("Failed to close scraper", zap.Error(err)) + } + if err := exporter.Close(); err != nil { + logger.Fatal("Failed to close exporter", zap.Error(err)) + } +} + +func logOtelMetric(md pmetric.Metrics, name string, logger *zap.Logger) { + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + rm := rms.At(i) + ilms := rm.ScopeMetrics() + for j := 0; j < ilms.Len(); j++ { + ilm := ilms.At(j) + metrics := ilm.Metrics() + for k := 0; k < metrics.Len(); k++ { + metric := metrics.At(k) + if metric.Name() == name { + logger.Info("Exporting OTel metric ", zap.Any("name", metric.Name()), zap.Any("value", metric.Sum().DataPoints().At(0).DoubleValue())) + } + } + } + } +} diff --git a/pkg/promotel/cmd/example_test.go b/pkg/promotel/cmd/example_test.go new file mode 100644 index 0000000000..12ed432233 --- /dev/null +++ b/pkg/promotel/cmd/example_test.go @@ -0,0 +1,60 @@ +package main + +import ( + "context" + "testing" + "time" + + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestExample(t *testing.T) { + logger, _ := zap.NewDevelopment() + + go reportMetrics(prometheus.DefaultRegisterer, logger) + + // Fetches metrics from in memory prometheus.Gatherer and converts to OTel format + foundCh := make(chan struct{}) + receiver := startMetricReceiver(prometheus.DefaultGatherer, logger, func(ctx context.Context, md pmetric.Metrics) error { + // Logs the converted OTel metric + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + rm := rms.At(i) + ilms := rm.ScopeMetrics() + for j := 0; j < ilms.Len(); j++ { + ilm := ilms.At(j) + metrics := ilm.Metrics() + for k := 0; k < metrics.Len(); k++ { + metric := metrics.At(k) + if metric.Name() == testCounterMetricName { + v := metric.Sum().DataPoints().At(0).DoubleValue() + logger.Info("Exporting OTel metric ", zap.Any("name", metric.Name()), zap.Any("value", v)) + if v > 0 { + foundCh <- struct{}{} + return nil + } + } + } + } + } + return nil + }) + defer receiver.Close() + + timeout := 10 * time.Second + if deadline, ok := t.Deadline(); !ok { + timeout = time.Until(deadline) + } + timer := time.NewTimer(timeout) + defer timer.Stop() + + select { + case <-timer.C: + t.Fatal("Timed out waiting for metric") + case <-foundCh: + t.Log("Found metric") + } +} diff --git a/pkg/promotel/config.go b/pkg/promotel/config.go new file mode 100644 index 0000000000..3d257ffbea --- /dev/null +++ b/pkg/promotel/config.go @@ -0,0 +1,149 @@ +package promotel + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/prometheus/prometheus/discovery" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/exporter/otlpexporter" + "gopkg.in/yaml.v3" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheusreceiver" +) + +type ReceiverConfig = component.Config +type ExporterConfig = component.Config + +func NewReceiverConfig(rawConf map[string]any) (ReceiverConfig, error) { + factory := prometheusreceiver.NewFactory() + + cfg := confmap.NewFromStringMap(rawConf) + // Creates a default configuration for the receiver + config := factory.CreateDefaultConfig() + // Merges the configuration into the default config + if err := cfg.Unmarshal(config); err != nil { + return nil, err + } + if err := validateConfig(config); err != nil { + return nil, err + } + return config, nil +} + +func NewDefaultReceiverConfig() (ReceiverConfig, error) { + return NewReceiverConfig(map[string]any{ + "config": map[string]any{ + "scrape_configs": []map[string]any{{ + "job_name": "promotel", + "scrape_interval": "1s", + "static_configs": []map[string]any{{"targets": []string{"127.0.0.1:8888"}}}, + "metric_relabel_configs": []map[string]any{{ + "action": "labeldrop", + "regex": "service_instance_id|service_name", + }}, + }}, + }, + }) +} + +func NewExporterConfig(rawConf map[string]any) (ExporterConfig, error) { + factory := otlpexporter.NewFactory() + + cfg := confmap.NewFromStringMap(rawConf) + // Creates a default configuration for the receiver + config := factory.CreateDefaultConfig() + // Merges the configuration into the default config + if err := cfg.Unmarshal(config); err != nil { + return nil, err + } + if err := component.ValidateConfig(config); err != nil { + return nil, err + } + return config, nil +} + +func NewDefaultExporterConfig() (ExporterConfig, error) { + return NewExporterConfig(map[string]any{ + "endpoint": "localhost:4317", + "tls": map[string]any{ + "insecure": true, + }, + }) +} + +// Used for tests +func LoadTestConfig(fileName string, configName string) (ReceiverConfig, error) { + content, err := os.ReadFile(filepath.Clean(fileName)) + if err != nil { + return nil, fmt.Errorf("unable to read the file %v: %w", fileName, err) + } + var rawConf map[string]any + if err = yaml.Unmarshal(content, &rawConf); err != nil { + return nil, err + } + if err != nil { + return nil, err + } + cm := confmap.NewFromStringMap(rawConf) + componentType := component.MustNewType("prometheus") + sub, err := cm.Sub(component.NewIDWithName(componentType, configName).String()) + if err != nil { + return nil, err + } + return NewReceiverConfig(sub.ToStringMap()) +} + +func validateConfig(config component.Config) error { + if err := component.ValidateConfig(config); err != nil { + return err + } + cfg, ok := config.(*prometheusreceiver.Config) + if !ok { + return fmt.Errorf("expected config to be of type *prometheusreceiver.Config, got %T", config) + } + if cfg.PrometheusConfig == nil { + return errors.New("PrometheusConfig is nil") + } + for _, scrapeConfig := range cfg.PrometheusConfig.ScrapeConfigs { + if scrapeConfig.JobName == "" { + return fmt.Errorf("unexpected job_name: %s", scrapeConfig.JobName) + } + if scrapeConfig.ScrapeInterval == 0 { + return fmt.Errorf("unexpected scrape_interval: %s", scrapeConfig.ScrapeInterval) + } + if scrapeConfig.MetricsPath == "" { + return errors.New("metrics_path is empty") + } + for _, cfg := range scrapeConfig.ServiceDiscoveryConfigs { + staticConfig, ok := cfg.(discovery.StaticConfig) + if !ok { + return fmt.Errorf("expected static config, got %T", cfg) + } + for _, c := range staticConfig { + if c.Targets == nil { + return errors.New("targets is nil") + } + if len(c.Targets) == 0 { + return errors.New("targets is empty") + } + } + if len(staticConfig) == 0 || len(staticConfig[0].Targets) == 0 || staticConfig[0].Targets[0].String() == "" { + return fmt.Errorf("unexpected targets: %v", staticConfig[0].Targets[0].String()) + } + } + for _, relabelConfig := range scrapeConfig.MetricRelabelConfigs { + if relabelConfig.Action == "" { + return fmt.Errorf("unexpected action: %s", relabelConfig.Action) + } + if relabelConfig.Regex.String() == "" { + return fmt.Errorf("unexpected regex: %s", relabelConfig.Regex.String()) + } + } + } + + return nil +} diff --git a/pkg/promotel/config_test.go b/pkg/promotel/config_test.go new file mode 100644 index 0000000000..0277e93c56 --- /dev/null +++ b/pkg/promotel/config_test.go @@ -0,0 +1,124 @@ +package promotel_test + +import ( + "path/filepath" + "testing" + "time" + + promModel "github.com/prometheus/common/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configauth" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configopaque" + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/exporter/otlpexporter" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel" + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheusreceiver" +) + +func TestConfig(t *testing.T) { + configFileName := filepath.Join("testdata", "promconfig.yaml") + cfg, err := promotel.LoadTestConfig(configFileName, "") + require.NoError(t, err) + + c0 := cfg.(*prometheusreceiver.Config) + assert.NotNil(t, c0.PrometheusConfig) + assert.NotNil(t, c0.PrometheusConfig) + + cfg, err = promotel.LoadTestConfig(configFileName, "withScrape") + require.NoError(t, err) + + c1 := cfg.(*prometheusreceiver.Config) + assert.NotNil(t, c0.PrometheusConfig) + + assert.Len(t, c1.PrometheusConfig.ScrapeConfigs, 1) + assert.Equal(t, "demo", c1.PrometheusConfig.ScrapeConfigs[0].JobName) + assert.Equal(t, promModel.Duration(5*time.Second), c1.PrometheusConfig.ScrapeConfigs[0].ScrapeInterval) + + cfg, err = promotel.LoadTestConfig(configFileName, "withOnlyScrape") + require.NoError(t, err) + + c2 := cfg.(*prometheusreceiver.Config) + assert.Len(t, c2.PrometheusConfig.ScrapeConfigs, 1) + assert.Equal(t, "demo", c2.PrometheusConfig.ScrapeConfigs[0].JobName) + assert.Equal(t, promModel.Duration(5*time.Second), c2.PrometheusConfig.ScrapeConfigs[0].ScrapeInterval) +} + +func TestUnmarshalDefaultConfig(t *testing.T) { + factory := otlpexporter.NewFactory() + cfg := factory.CreateDefaultConfig() + require.NoError(t, confmap.New().Unmarshal(&cfg)) + assert.Equal(t, factory.CreateDefaultConfig(), cfg) + + cfg, err := promotel.NewDefaultExporterConfig() + require.NoError(t, err) + assert.Equal(t, "localhost:4317", cfg.(*otlpexporter.Config).ClientConfig.Endpoint) + assert.True(t, cfg.(*otlpexporter.Config).ClientConfig.TLSSetting.Insecure) +} + +func TestUnmarshalConfig(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "exporter-config.yaml")) + require.NoError(t, err) + cfg, err := promotel.NewExporterConfig(cm.ToStringMap()) + require.NoError(t, err) + assert.Equal(t, + &otlpexporter.Config{ + TimeoutConfig: exporterhelper.TimeoutConfig{ + Timeout: 10 * time.Second, + }, + RetryConfig: configretry.BackOffConfig{ + Enabled: true, + InitialInterval: 10 * time.Second, + RandomizationFactor: 0.7, + Multiplier: 1.3, + MaxInterval: 1 * time.Minute, + MaxElapsedTime: 10 * time.Minute, + }, + QueueConfig: exporterhelper.QueueConfig{ + Enabled: true, + NumConsumers: 2, + QueueSize: 10, + }, + BatcherConfig: exporterbatcher.Config{ + Enabled: true, + FlushTimeout: 200 * time.Millisecond, + MinSizeConfig: exporterbatcher.MinSizeConfig{ + MinSizeItems: 1000, + }, + MaxSizeConfig: exporterbatcher.MaxSizeConfig{ + MaxSizeItems: 10000, + }, + }, + ClientConfig: configgrpc.ClientConfig{ + Headers: map[string]configopaque.String{ + "can you have a . here?": "F0000000-0000-0000-0000-000000000000", + "header1": "234", + "another": "somevalue", + }, + Endpoint: "1.2.3.4:1234", + Compression: "gzip", + TLSSetting: configtls.ClientConfig{ + Config: configtls.Config{ + CAFile: "/var/lib/mycert.pem", + }, + Insecure: false, + }, + Keepalive: &configgrpc.KeepaliveClientConfig{ + Time: 20 * time.Second, + PermitWithoutStream: true, + Timeout: 30 * time.Second, + }, + WriteBufferSize: 512 * 1024, + BalancerName: "round_robin", + Auth: &configauth.Authentication{AuthenticatorID: component.MustNewID("nop")}, + }, + }, cfg) +} diff --git a/pkg/promotel/exporter.go b/pkg/promotel/exporter.go new file mode 100644 index 0000000000..854d60760a --- /dev/null +++ b/pkg/promotel/exporter.go @@ -0,0 +1,52 @@ +package promotel + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/otlpexporter" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal" +) + +type MetricExporter interface { + Runnable + Consumer() consumer.Metrics +} + +type metricExporter struct { + factory exporter.Factory + host component.Host + exporter exporter.Metrics +} + +func (me *metricExporter) Start(ctx context.Context) error { + return me.exporter.Start(ctx, me.host) +} + +func (me *metricExporter) Close() error { + return me.exporter.Shutdown(context.Background()) +} + +func (me *metricExporter) Consumer() consumer.Metrics { + // Writes metrics data to stdout + return me.exporter +} + +func NewMetricExporter(config ExporterConfig, logger *zap.Logger) (MetricExporter, error) { + factory := otlpexporter.NewFactory() + // Creates a metrics receiver with the context, settings, config, and consumer + exporter, err := factory.CreateMetrics( + context.Background(), + internal.NewExporterSettings(logger), + config) + if err != nil { + return nil, err + } + // Creates a no-operation host for the receiver + host := internal.NewNopHost() + return &metricExporter{factory, host, exporter}, nil +} diff --git a/pkg/promotel/exporter_test.go b/pkg/promotel/exporter_test.go new file mode 100644 index 0000000000..80a25b54d5 --- /dev/null +++ b/pkg/promotel/exporter_test.go @@ -0,0 +1,54 @@ +package promotel + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configcompression" + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/exporter/otlpexporter" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := otlpexporter.NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + require.NoError(t, componenttest.CheckConfigStruct(cfg)) + ocfg, ok := factory.CreateDefaultConfig().(*otlpexporter.Config) + assert.True(t, ok) + assert.Equal(t, configretry.NewDefaultBackOffConfig(), ocfg.RetryConfig) + assert.Equal(t, exporterhelper.NewDefaultQueueConfig(), ocfg.QueueConfig) + assert.Equal(t, exporterhelper.NewDefaultTimeoutConfig(), ocfg.TimeoutConfig) + assert.Equal(t, configcompression.TypeGzip, ocfg.Compression) +} + +func TestCreateMetrics(t *testing.T) { + factory := otlpexporter.NewFactory() + cfg := factory.CreateDefaultConfig().(*otlpexporter.Config) + cfg.ClientConfig.Endpoint = "localhost:4317" + + set := exportertest.NewNopSettings() + oexp, err := factory.CreateMetrics(context.Background(), set, cfg) + require.NoError(t, err) + require.NotNil(t, oexp) +} + +func TestMetricExporter(t *testing.T) { + factory := otlpexporter.NewFactory() + cfg := factory.CreateDefaultConfig().(*otlpexporter.Config) + cfg.ClientConfig.Endpoint = "localhost:4317" + + exporter, err := NewMetricExporter(cfg, nil) + require.NoError(t, err) + require.NotNil(t, exporter) + + require.NoError(t, exporter.Start(context.Background())) + require.NoError(t, exporter.Close()) + require.NotNil(t, exporter.Consumer()) +} diff --git a/pkg/promotel/internal/consumer.go b/pkg/promotel/internal/consumer.go new file mode 100644 index 0000000000..1f196f4186 --- /dev/null +++ b/pkg/promotel/internal/consumer.go @@ -0,0 +1,33 @@ +package internal + +import ( + "context" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +type nonMutatingConsumer struct{} + +// Capabilities returns the base consumer capabilities. +func (bc nonMutatingConsumer) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: false} +} + +type baseConsumer struct { + nonMutatingConsumer + consumer.ConsumeMetricsFunc +} + +// NewNop returns a Consumer that just drops all received data and returns no error. +func NewNopConsumer() consumer.Metrics { + return &baseConsumer{ + ConsumeMetricsFunc: func(context.Context, pmetric.Metrics) error { return nil }, + } +} + +func NewConsumer(consumeFunc consumer.ConsumeMetricsFunc) consumer.Metrics { + return &baseConsumer{ + ConsumeMetricsFunc: consumeFunc, + } +} diff --git a/pkg/promotel/internal/debug.go b/pkg/promotel/internal/debug.go new file mode 100644 index 0000000000..057927a564 --- /dev/null +++ b/pkg/promotel/internal/debug.go @@ -0,0 +1,314 @@ +package internal + +import ( + "bytes" + "fmt" + "math" + "strings" + + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" +) + +type Exporter interface { + Export(md pmetric.Metrics) error +} + +type DebugExporter struct { + logger *zap.Logger + metricsMarshaler pmetric.Marshaler + verbosity configtelemetry.Level +} + +func NewDebugExporter(logger *zap.Logger) Exporter { + return &DebugExporter{ + logger: logger, + metricsMarshaler: &pmetric.JSONMarshaler{}, + verbosity: configtelemetry.LevelDetailed, + } +} + +func (s *DebugExporter) Export(md pmetric.Metrics) error { + s.logger.Info("Metrics", + zap.Int("resource metrics", md.ResourceMetrics().Len()), + zap.Int("metrics", md.MetricCount()), + zap.Int("data points", md.DataPointCount()), + ) + + if s.verbosity == configtelemetry.LevelBasic { + return nil + } + + buf, err := marshalMetrics(md) + if err != nil { + return err + } + s.logger.Info(string(buf)) + return nil +} + +// MarshalMetrics pmetric.Metrics to OTLP text. +func marshalMetrics(md pmetric.Metrics) ([]byte, error) { + buf := dataBuffer{} + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + buf.logEntry("ResourceMetrics #%d", i) + rm := rms.At(i) + buf.logEntry("Resource SchemaURL: %s", rm.SchemaUrl()) + buf.logAttributes("Resource attributes", rm.Resource().Attributes()) + ilms := rm.ScopeMetrics() + for j := 0; j < ilms.Len(); j++ { + buf.logEntry("ScopeMetrics #%d", j) + ilm := ilms.At(j) + buf.logEntry("ScopeMetrics SchemaURL: %s", ilm.SchemaUrl()) + buf.logInstrumentationScope(ilm.Scope()) + metrics := ilm.Metrics() + for k := 0; k < metrics.Len(); k++ { + buf.logEntry("Metric #%d", k) + metric := metrics.At(k) + buf.logMetricDescriptor(metric) + buf.logMetricDataPoints(metric) + } + } + } + + return buf.buf.Bytes(), nil +} + +type dataBuffer struct { + buf bytes.Buffer +} + +func (b *dataBuffer) logEntry(format string, a ...any) { + b.buf.WriteString(fmt.Sprintf(format, a...)) + b.buf.WriteString("\n") +} + +func (b *dataBuffer) logAttributes(header string, m pcommon.Map) { + if m.Len() == 0 { + return + } + + b.logEntry("%s:", header) + attrPrefix := " ->" + + // Add offset to attributes if needed. + headerParts := strings.Split(header, "->") + if len(headerParts) > 1 { + attrPrefix = headerParts[0] + attrPrefix + } + + m.Range(func(k string, v pcommon.Value) bool { + b.logEntry("%s %s: %s", attrPrefix, k, valueToString(v)) + return true + }) +} + +func (b *dataBuffer) logInstrumentationScope(il pcommon.InstrumentationScope) { + b.logEntry( + "InstrumentationScope %s %s", + il.Name(), + il.Version()) + b.logAttributes("InstrumentationScope attributes", il.Attributes()) +} + +func (b *dataBuffer) logMetricDescriptor(md pmetric.Metric) { + b.logEntry("Descriptor:") + b.logEntry(" -> Name: %s", md.Name()) + b.logEntry(" -> Description: %s", md.Description()) + b.logEntry(" -> Unit: %s", md.Unit()) + b.logEntry(" -> DataType: %s", md.Type().String()) +} + +func (b *dataBuffer) logMetricDataPoints(m pmetric.Metric) { + switch m.Type() { + case pmetric.MetricTypeEmpty: + return + case pmetric.MetricTypeGauge: + b.logNumberDataPoints(m.Gauge().DataPoints()) + case pmetric.MetricTypeSum: + data := m.Sum() + b.logEntry(" -> IsMonotonic: %t", data.IsMonotonic()) + b.logEntry(" -> AggregationTemporality: %s", data.AggregationTemporality().String()) + b.logNumberDataPoints(data.DataPoints()) + case pmetric.MetricTypeHistogram: + data := m.Histogram() + b.logEntry(" -> AggregationTemporality: %s", data.AggregationTemporality().String()) + b.logHistogramDataPoints(data.DataPoints()) + case pmetric.MetricTypeExponentialHistogram: + data := m.ExponentialHistogram() + b.logEntry(" -> AggregationTemporality: %s", data.AggregationTemporality().String()) + b.logExponentialHistogramDataPoints(data.DataPoints()) + case pmetric.MetricTypeSummary: + data := m.Summary() + b.logDoubleSummaryDataPoints(data.DataPoints()) + } +} + +func (b *dataBuffer) logNumberDataPoints(ps pmetric.NumberDataPointSlice) { + for i := 0; i < ps.Len(); i++ { + p := ps.At(i) + b.logEntry("NumberDataPoints #%d", i) + b.logDataPointAttributes(p.Attributes()) + + b.logEntry("StartTimestamp: %s", p.StartTimestamp()) + b.logEntry("Timestamp: %s", p.Timestamp()) + switch p.ValueType() { + case pmetric.NumberDataPointValueTypeInt: + b.logEntry("Value: %d", p.IntValue()) + case pmetric.NumberDataPointValueTypeDouble: + b.logEntry("Value: %f", p.DoubleValue()) + case pmetric.NumberDataPointValueTypeEmpty: + b.logEntry("Value: Empty") + } + + b.logExemplars("Exemplars", p.Exemplars()) + } +} + +func (b *dataBuffer) logHistogramDataPoints(ps pmetric.HistogramDataPointSlice) { + for i := 0; i < ps.Len(); i++ { + p := ps.At(i) + b.logEntry("HistogramDataPoints #%d", i) + b.logDataPointAttributes(p.Attributes()) + + b.logEntry("StartTimestamp: %s", p.StartTimestamp()) + b.logEntry("Timestamp: %s", p.Timestamp()) + b.logEntry("Count: %d", p.Count()) + + if p.HasSum() { + b.logEntry("Sum: %f", p.Sum()) + } + + if p.HasMin() { + b.logEntry("Min: %f", p.Min()) + } + + if p.HasMax() { + b.logEntry("Max: %f", p.Max()) + } + + for i := 0; i < p.ExplicitBounds().Len(); i++ { + b.logEntry("ExplicitBounds #%d: %f", i, p.ExplicitBounds().At(i)) + } + + for j := 0; j < p.BucketCounts().Len(); j++ { + b.logEntry("Buckets #%d, Count: %d", j, p.BucketCounts().At(j)) + } + + b.logExemplars("Exemplars", p.Exemplars()) + } +} + +func (b *dataBuffer) logExponentialHistogramDataPoints(ps pmetric.ExponentialHistogramDataPointSlice) { + for i := 0; i < ps.Len(); i++ { + p := ps.At(i) + b.logEntry("ExponentialHistogramDataPoints #%d", i) + b.logDataPointAttributes(p.Attributes()) + + b.logEntry("StartTimestamp: %s", p.StartTimestamp()) + b.logEntry("Timestamp: %s", p.Timestamp()) + b.logEntry("Count: %d", p.Count()) + + if p.HasSum() { + b.logEntry("Sum: %f", p.Sum()) + } + + if p.HasMin() { + b.logEntry("Min: %f", p.Min()) + } + + if p.HasMax() { + b.logEntry("Max: %f", p.Max()) + } + + scale := int(p.Scale()) + factor := math.Ldexp(math.Ln2, -scale) + // Note: the equation used here, which is + // math.Exp(index * factor) + // reports +Inf as the _lower_ boundary of the bucket nearest + // infinity, which is incorrect and can be addressed in various + // ways. The OTel-Go implementation of this histogram pending + // in https://github.com/open-telemetry/opentelemetry-go/pull/2393 + // uses a lookup table for the last finite boundary, which can be + // easily computed using `math/big` (for scales up to 20). + + negB := p.Negative().BucketCounts() + posB := p.Positive().BucketCounts() + + for i := 0; i < negB.Len(); i++ { + pos := negB.Len() - i - 1 + index := float64(p.Negative().Offset()) + float64(pos) + lower := math.Exp(index * factor) + upper := math.Exp((index + 1) * factor) + b.logEntry("Bucket [%f, %f), Count: %d", -upper, -lower, negB.At(pos)) + } + + if p.ZeroCount() != 0 { + b.logEntry("Bucket [0, 0], Count: %d", p.ZeroCount()) + } + + for pos := 0; pos < posB.Len(); pos++ { + index := float64(p.Positive().Offset()) + float64(pos) + lower := math.Exp(index * factor) + upper := math.Exp((index + 1) * factor) + b.logEntry("Bucket (%f, %f], Count: %d", lower, upper, posB.At(pos)) + } + + b.logExemplars("Exemplars", p.Exemplars()) + } +} + +func (b *dataBuffer) logDoubleSummaryDataPoints(ps pmetric.SummaryDataPointSlice) { + for i := 0; i < ps.Len(); i++ { + p := ps.At(i) + b.logEntry("SummaryDataPoints #%d", i) + b.logDataPointAttributes(p.Attributes()) + + b.logEntry("StartTimestamp: %s", p.StartTimestamp()) + b.logEntry("Timestamp: %s", p.Timestamp()) + b.logEntry("Count: %d", p.Count()) + b.logEntry("Sum: %f", p.Sum()) + + quantiles := p.QuantileValues() + for i := 0; i < quantiles.Len(); i++ { + quantile := quantiles.At(i) + b.logEntry("QuantileValue #%d: Quantile %f, Value %f", i, quantile.Quantile(), quantile.Value()) + } + } +} + +func (b *dataBuffer) logDataPointAttributes(attributes pcommon.Map) { + b.logAttributes("Data point attributes", attributes) +} + +func (b *dataBuffer) logExemplars(description string, se pmetric.ExemplarSlice) { + if se.Len() == 0 { + return + } + + b.logEntry("%s:", description) + + for i := 0; i < se.Len(); i++ { + e := se.At(i) + b.logEntry("Exemplar #%d", i) + b.logEntry(" -> Trace ID: %s", e.TraceID()) + b.logEntry(" -> Span ID: %s", e.SpanID()) + b.logEntry(" -> Timestamp: %s", e.Timestamp()) + switch e.ValueType() { + case pmetric.ExemplarValueTypeInt: + b.logEntry(" -> Value: %d", e.IntValue()) + case pmetric.ExemplarValueTypeDouble: + b.logEntry(" -> Value: %f", e.DoubleValue()) + case pmetric.ExemplarValueTypeEmpty: + b.logEntry(" -> Value: Empty") + } + b.logAttributes(" -> FilteredAttributes", e.FilteredAttributes()) + } +} + +func valueToString(v pcommon.Value) string { + return fmt.Sprintf("%s(%s)", v.Type().String(), v.AsString()) +} diff --git a/pkg/promotel/internal/host.go b/pkg/promotel/internal/host.go new file mode 100644 index 0000000000..7685f7f590 --- /dev/null +++ b/pkg/promotel/internal/host.go @@ -0,0 +1,17 @@ +package internal + +import "go.opentelemetry.io/collector/component" + +type nopHost struct{} + +func NewNopHost() component.Host { + return &nopHost{} +} + +func (nh *nopHost) GetFactory(component.Kind, component.Type) component.Factory { + return nil +} + +func (nh *nopHost) GetExtensions() map[component.ID]component.Component { + return nil +} diff --git a/pkg/promotel/internal/prometheus/pdatautil/hash.go b/pkg/promotel/internal/prometheus/pdatautil/hash.go new file mode 100644 index 0000000000..066f071195 --- /dev/null +++ b/pkg/promotel/internal/prometheus/pdatautil/hash.go @@ -0,0 +1,204 @@ +package pdatautil + +import ( + "encoding/binary" + "math" + "sort" + "sync" + + "github.com/cespare/xxhash/v2" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +var ( + extraByte = []byte{'\xf3'} + keyPrefix = []byte{'\xf4'} + valEmpty = []byte{'\xf5'} + valBytesPrefix = []byte{'\xf6'} + valStrPrefix = []byte{'\xf7'} + valBoolTrue = []byte{'\xf8'} + valBoolFalse = []byte{'\xf9'} + valIntPrefix = []byte{'\xfa'} + valDoublePrefix = []byte{'\xfb'} + valMapPrefix = []byte{'\xfc'} + valMapSuffix = []byte{'\xfd'} + valSlicePrefix = []byte{'\xfe'} + valSliceSuffix = []byte{'\xff'} + + emptyHash = [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} +) + +// HashOption is a function that sets an option on the hash calculation. +type HashOption func(*hashWriter) + +// WithMap adds a map to the hash calculation. +func WithMap(m pcommon.Map) HashOption { + return func(hw *hashWriter) { + hw.writeMapHash(m) + } +} + +// WithValue adds a value to the hash calculation. +func WithValue(v pcommon.Value) HashOption { + return func(hw *hashWriter) { + hw.writeValueHash(v) + } +} + +// WithString adds a string to the hash calculation. +func WithString(s string) HashOption { + return func(hw *hashWriter) { + hw.byteBuf = append(hw.byteBuf, valStrPrefix...) + hw.byteBuf = append(hw.byteBuf, s...) + } +} + +type hashWriter struct { + byteBuf []byte + keysBuf []string +} + +func newHashWriter() *hashWriter { + return &hashWriter{ + byteBuf: make([]byte, 0, 512), + keysBuf: make([]string, 0, 16), + } +} + +var hashWriterPool = &sync.Pool{ + New: func() any { return newHashWriter() }, +} + +// Hash generates a hash for the provided options and returns the computed hash as a [16]byte. +func Hash(opts ...HashOption) [16]byte { + if len(opts) == 0 { + return emptyHash + } + + hw := hashWriterPool.Get().(*hashWriter) + defer hashWriterPool.Put(hw) + hw.byteBuf = hw.byteBuf[:0] + + for _, o := range opts { + o(hw) + } + + return hw.hashSum128() +} + +// Hash64 generates a hash for the provided options and returns the computed hash as a uint64. +func Hash64(opts ...HashOption) uint64 { + hash := Hash(opts...) + return xxhash.Sum64(hash[:]) +} + +// MapHash return a hash for the provided map. +// Maps with the same underlying key/value pairs in different order produce the same deterministic hash value. +func MapHash(m pcommon.Map) [16]byte { + if m.Len() == 0 { + return emptyHash + } + + hw := hashWriterPool.Get().(*hashWriter) + defer hashWriterPool.Put(hw) + hw.byteBuf = hw.byteBuf[:0] + + hw.writeMapHash(m) + + return hw.hashSum128() +} + +// ValueHash return a hash for the provided pcommon.Value. +func ValueHash(v pcommon.Value) [16]byte { + hw := hashWriterPool.Get().(*hashWriter) + defer hashWriterPool.Put(hw) + hw.byteBuf = hw.byteBuf[:0] + + hw.writeValueHash(v) + + return hw.hashSum128() +} + +func (hw *hashWriter) writeMapHash(m pcommon.Map) { + // For each recursive call into this function we want to preserve the previous buffer state + // while also adding new keys to the buffer. nextIndex is the index of the first new key + // added to the buffer for this call of the function. + // This also works for the first non-recursive call of this function because the buffer is always empty + // on the first call due to it being cleared of any added keys at then end of the function. + nextIndex := len(hw.keysBuf) + + m.Range(func(k string, _ pcommon.Value) bool { + hw.keysBuf = append(hw.keysBuf, k) + return true + }) + + // Get only the newly added keys from the buffer by slicing the buffer from nextIndex to the end + workingKeySet := hw.keysBuf[nextIndex:] + + sort.Strings(workingKeySet) + for _, k := range workingKeySet { + v, _ := m.Get(k) + hw.byteBuf = append(hw.byteBuf, keyPrefix...) + hw.byteBuf = append(hw.byteBuf, k...) + hw.writeValueHash(v) + } + + // Remove all keys that were added to the buffer during this call of the function + hw.keysBuf = hw.keysBuf[:nextIndex] +} + +func (hw *hashWriter) writeValueHash(v pcommon.Value) { + switch v.Type() { + case pcommon.ValueTypeStr: + hw.writeString(v.Str()) + case pcommon.ValueTypeBool: + if v.Bool() { + hw.byteBuf = append(hw.byteBuf, valBoolTrue...) + } else { + hw.byteBuf = append(hw.byteBuf, valBoolFalse...) + } + case pcommon.ValueTypeInt: + hw.byteBuf = append(hw.byteBuf, valIntPrefix...) + hw.byteBuf = binary.LittleEndian.AppendUint64(hw.byteBuf, uint64(v.Int())) // nolint + case pcommon.ValueTypeDouble: + hw.byteBuf = append(hw.byteBuf, valDoublePrefix...) + hw.byteBuf = binary.LittleEndian.AppendUint64(hw.byteBuf, math.Float64bits(v.Double())) + case pcommon.ValueTypeMap: + hw.byteBuf = append(hw.byteBuf, valMapPrefix...) + hw.writeMapHash(v.Map()) + hw.byteBuf = append(hw.byteBuf, valMapSuffix...) + case pcommon.ValueTypeSlice: + sl := v.Slice() + hw.byteBuf = append(hw.byteBuf, valSlicePrefix...) + for i := 0; i < sl.Len(); i++ { + hw.writeValueHash(sl.At(i)) + } + hw.byteBuf = append(hw.byteBuf, valSliceSuffix...) + case pcommon.ValueTypeBytes: + hw.byteBuf = append(hw.byteBuf, valBytesPrefix...) + hw.byteBuf = append(hw.byteBuf, v.Bytes().AsRaw()...) + case pcommon.ValueTypeEmpty: + hw.byteBuf = append(hw.byteBuf, valEmpty...) + } +} + +func (hw *hashWriter) writeString(s string) { + hw.byteBuf = append(hw.byteBuf, valStrPrefix...) + hw.byteBuf = append(hw.byteBuf, s...) +} + +// hashSum128 returns a [16]byte hash sum. +func (hw *hashWriter) hashSum128() [16]byte { + r := [16]byte{} + res := r[:] + + h := xxhash.Sum64(hw.byteBuf) + res = binary.LittleEndian.AppendUint64(res[:0], h) + + // Append an extra byte to generate another part of the hash sum + hw.byteBuf = append(hw.byteBuf, extraByte...) + h = xxhash.Sum64(hw.byteBuf) + _ = binary.LittleEndian.AppendUint64(res[8:], h) + + return r +} diff --git a/pkg/promotel/internal/prometheus/pdatautil/hash_test.go b/pkg/promotel/internal/prometheus/pdatautil/hash_test.go new file mode 100644 index 0000000000..3481a87098 --- /dev/null +++ b/pkg/promotel/internal/prometheus/pdatautil/hash_test.go @@ -0,0 +1,389 @@ +package pdatautil + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +func TestMapHash(t *testing.T) { + tests := []struct { + name string + maps []pcommon.Map + equal bool + }{ + { + name: "different_maps", + maps: func() []pcommon.Map { + m := make([]pcommon.Map, 29) + for i := 0; i < len(m); i++ { + m[i] = pcommon.NewMap() + } + m[1].PutStr("k", "") + m[2].PutStr("k", "v") + m[3].PutStr("k1", "v1") + m[4].PutBool("k", false) + m[5].PutBool("k", true) + m[6].PutInt("k", 0) + m[7].PutInt("k", 1) + m[8].PutDouble("k", 0) + m[9].PutDouble("k", 1) + m[10].PutEmpty("k") + + m[11].PutStr("k1", "val") + m[11].PutStr("k2", "val") + m[12].PutStr("k1", "va") + m[12].PutStr("lk2", "val") + + m[13].PutEmptySlice("k") + m[14].PutEmptySlice("k").AppendEmpty() + m[15].PutEmptySlice("k").AppendEmpty().SetStr("") + m[16].PutEmptySlice("k").AppendEmpty().SetStr("v") + sl1 := m[17].PutEmptySlice("k") + sl1.AppendEmpty().SetStr("v1") + sl1.AppendEmpty().SetStr("v2") + sl2 := m[18].PutEmptySlice("k") + sl2.AppendEmpty().SetStr("v2") + sl2.AppendEmpty().SetStr("v1") + + m[19].PutEmptyBytes("k") + m[20].PutEmptyBytes("k").FromRaw([]byte{0}) + m[21].PutEmptyBytes("k").FromRaw([]byte{1}) + + m[22].PutEmptyMap("k") + m[23].PutEmptyMap("k").PutStr("k", "") + m[24].PutEmptyMap("k").PutBool("k", false) + m[25].PutEmptyMap("k").PutEmptyMap("") + m[26].PutEmptyMap("k").PutEmptyMap("k") + + m[27].PutStr("k1", "v1") + m[27].PutStr("k2", "v2") + m[28].PutEmptyMap("k0").PutStr("k1", "v1") + m[28].PutStr("k2", "v2") + + return m + }(), + equal: false, + }, + { + name: "empty_maps", + maps: []pcommon.Map{pcommon.NewMap(), pcommon.NewMap()}, + equal: true, + }, + { + name: "same_maps_different_order", + maps: func() []pcommon.Map { + m := []pcommon.Map{pcommon.NewMap(), pcommon.NewMap()} + m[0].PutStr("k1", "v1") + m[0].PutInt("k2", 1) + m[0].PutDouble("k3", 1) + m[0].PutBool("k4", true) + m[0].PutEmptyBytes("k5").FromRaw([]byte("abc")) + sl := m[0].PutEmptySlice("k6") + sl.AppendEmpty().SetStr("str") + sl.AppendEmpty().SetBool(true) + m0 := m[0].PutEmptyMap("k") + m0.PutInt("k1", 1) + m0.PutDouble("k2", 10) + + m1 := m[1].PutEmptyMap("k") + m1.PutDouble("k2", 10) + m1.PutInt("k1", 1) + m[1].PutEmptyBytes("k5").FromRaw([]byte("abc")) + m[1].PutBool("k4", true) + sl = m[1].PutEmptySlice("k6") + sl.AppendEmpty().SetStr("str") + sl.AppendEmpty().SetBool(true) + m[1].PutInt("k2", 1) + m[1].PutStr("k1", "v1") + m[1].PutDouble("k3", 1) + + return m + }(), + equal: true, + }, + { + // Specific test to ensure panic described in https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/18910 is fixed. + name: "nested_maps_different_order", + maps: func() []pcommon.Map { + m := []pcommon.Map{pcommon.NewMap(), pcommon.NewMap()} + m[0].PutStr("k1", "v1") + m0 := m[0].PutEmptyMap("k2") + m[0].PutDouble("k3", 1) + m[0].PutBool("k4", true) + m0.PutInt("k21", 1) + m0.PutInt("k22", 1) + m0.PutInt("k23", 1) + + m1 := m[1].PutEmptyMap("k2") + m1.PutInt("k22", 1) + m1.PutInt("k21", 1) + m1.PutInt("k23", 1) + m[1].PutDouble("k3", 1) + m[1].PutStr("k1", "v1") + m[1].PutBool("k4", true) + + return m + }(), + equal: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for i := 0; i < len(tt.maps); i++ { + for j := i + 1; j < len(tt.maps); j++ { + if tt.equal { + assert.Equal(t, MapHash(tt.maps[i]), MapHash(tt.maps[j]), + "maps %d %v and %d %v must have the same hash", i, tt.maps[i].AsRaw(), j, tt.maps[j].AsRaw()) + } else { + assert.NotEqual(t, MapHash(tt.maps[i]), MapHash(tt.maps[j]), + "maps %d %v and %d %v must have different hashes", i, tt.maps[i].AsRaw(), j, tt.maps[j].AsRaw()) + } + } + } + }) + } +} + +func TestValueHash(t *testing.T) { + tests := []struct { + name string + values []pcommon.Value + equal bool + }{ + { + name: "different_values", + values: func() []pcommon.Value { + m := make([]pcommon.Value, 21) + for i := 0; i < len(m); i++ { + m[i] = pcommon.NewValueEmpty() + } + m[1].SetStr("") + m[2].SetStr("v") + m[3].SetBool(false) + m[4].SetBool(true) + m[5].SetInt(0) + m[6].SetInt(1) + m[7].SetDouble(0) + m[8].SetDouble(1) + + m[9].SetEmptySlice() + m[10].SetEmptySlice().AppendEmpty() + m[11].SetEmptySlice().AppendEmpty().SetStr("") + m[12].SetEmptySlice().AppendEmpty().SetStr("v") + + m[13].SetEmptyBytes() + m[14].SetEmptyBytes().FromRaw([]byte{0}) + m[15].SetEmptyBytes().FromRaw([]byte{1}) + + m[16].SetEmptyMap() + m[17].SetEmptyMap().PutStr("k", "") + m[18].SetEmptyMap().PutBool("k", false) + m[19].SetEmptyMap().PutEmptyMap("") + m[20].SetEmptyMap().PutEmptyMap("k") + + return m + }(), + equal: false, + }, + { + name: "empty_values", + values: []pcommon.Value{pcommon.NewValueEmpty(), pcommon.NewValueEmpty()}, + equal: true, + }, + { + name: "empty_strings", + values: []pcommon.Value{pcommon.NewValueStr(""), pcommon.NewValueStr("")}, + equal: true, + }, + { + name: "strings", + values: []pcommon.Value{pcommon.NewValueStr("v"), pcommon.NewValueStr("v")}, + equal: true, + }, + { + name: "int", + values: []pcommon.Value{pcommon.NewValueInt(1), pcommon.NewValueInt(1)}, + equal: true, + }, + { + name: "double", + values: []pcommon.Value{pcommon.NewValueDouble(1), pcommon.NewValueDouble(1)}, + equal: true, + }, + { + name: "bool", + values: []pcommon.Value{pcommon.NewValueBool(true), pcommon.NewValueBool(true)}, + equal: true, + }, + { + name: "empty_bytes", + values: []pcommon.Value{pcommon.NewValueBytes(), pcommon.NewValueBytes()}, + equal: true, + }, + { + name: "bytes", + values: func() []pcommon.Value { + v1 := pcommon.NewValueBytes() + require.NoError(t, v1.FromRaw([]byte{0})) + v2 := pcommon.NewValueBytes() + require.NoError(t, v2.FromRaw([]byte{0})) + return []pcommon.Value{v1, v2} + }(), + equal: true, + }, + { + name: "empty_slices", + values: []pcommon.Value{pcommon.NewValueSlice(), pcommon.NewValueSlice()}, + equal: true, + }, + { + name: "slices_with_empty_items", + values: func() []pcommon.Value { + v1 := pcommon.NewValueSlice() + v1.Slice().AppendEmpty() + v2 := pcommon.NewValueSlice() + v2.Slice().AppendEmpty() + return []pcommon.Value{v1, v2} + }(), + equal: true, + }, + { + name: "slices", + values: func() []pcommon.Value { + v1 := pcommon.NewValueSlice() + v1.Slice().AppendEmpty().SetStr("v") + v2 := pcommon.NewValueSlice() + v2.Slice().AppendEmpty().SetStr("v") + return []pcommon.Value{v1, v2} + }(), + equal: true, + }, + { + name: "empty_maps", + values: []pcommon.Value{pcommon.NewValueMap(), pcommon.NewValueMap()}, + equal: true, + }, + { + name: "maps", + values: func() []pcommon.Value { + v1 := pcommon.NewValueMap() + v1.Map().PutStr("k1", "v") + v1.Map().PutInt("k2", 0) + v2 := pcommon.NewValueMap() + v2.Map().PutInt("k2", 0) + v2.Map().PutStr("k1", "v") + return []pcommon.Value{v1, v2} + }(), + equal: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for i := 0; i < len(tt.values); i++ { + for j := i + 1; j < len(tt.values); j++ { + if tt.equal { + assert.Equal(t, ValueHash(tt.values[i]), ValueHash(tt.values[j]), + "values %d %v and %d %v must have the same hash", i, tt.values[i].AsRaw(), j, tt.values[j].AsRaw()) + } else { + assert.NotEqual(t, ValueHash(tt.values[i]), ValueHash(tt.values[j]), + "values %d %v and %d %v must have different hashes", i, tt.values[i].AsRaw(), j, tt.values[j].AsRaw()) + } + } + } + }) + } +} + +func TestMapValueHashNotEqual(t *testing.T) { + tests := []struct { + name string + m pcommon.Map + v pcommon.Value + }{ + { + name: "empty", + v: pcommon.NewValueMap(), + m: pcommon.NewMap(), + }, + { + name: "not_empty", + v: func() pcommon.Value { + v := pcommon.NewValueMap() + v.Map().PutStr("k", "v") + return v + }(), + m: func() pcommon.Map { + m := pcommon.NewMap() + m.PutStr("k", "v") + return m + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.NotEqual(t, ValueHash(tt.v), MapHash(tt.m), + "value %v and map %v must have different hashes", tt.v.AsRaw(), tt.m.AsRaw()) + }) + } +} + +func BenchmarkMapHashFourItems(b *testing.B) { + m := pcommon.NewMap() + m.PutStr("test-string-key2", "test-value-2") + m.PutStr("test-string-key1", "test-value-1") + m.PutInt("test-int-key", 123) + m.PutBool("test-bool-key", true) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + MapHash(m) + } +} + +func BenchmarkMapHashEightItems(b *testing.B) { + m := pcommon.NewMap() + m.PutStr("test-string-key2", "test-value-2") + m.PutStr("test-string-key1", "test-value-1") + m.PutInt("test-int-key", 123) + m.PutBool("test-bool-key", true) + m.PutStr("test-string-key3", "test-value-3") + m.PutDouble("test-double-key2", 22.123) + m.PutDouble("test-double-key1", 11.123) + m.PutEmptyBytes("test-bytes-key").FromRaw([]byte("abc")) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + MapHash(m) + } +} + +func BenchmarkMapHashWithEmbeddedSliceAndMap(b *testing.B) { + m := pcommon.NewMap() + m.PutStr("test-string-key2", "test-value-2") + m.PutStr("test-string-key1", "test-value-1") + m.PutInt("test-int-key", 123) + m.PutBool("test-bool-key", true) + m.PutStr("test-string-key3", "test-value-3") + m.PutDouble("test-double-key2", 22.123) + m.PutDouble("test-double-key1", 11.123) + m.PutEmptyBytes("test-bytes-key").FromRaw([]byte("abc")) + m1 := m.PutEmptyMap("test-map-key") + m1.PutStr("test-embedded-string-key", "test-embedded-string-value") + m1.PutDouble("test-embedded-double-key", 22.123) + m1.PutInt("test-embedded-int-key", 234) + sl := m.PutEmptySlice("test-slice-key") + sl.AppendEmpty().SetStr("test-slice-string-1") + sl.AppendEmpty().SetStr("test-slice-string-2") + sl.AppendEmpty().SetStr("test-slice-string-3") + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + MapHash(m) + } +} diff --git a/pkg/promotel/internal/prometheus/pdatautil/metadata.yaml b/pkg/promotel/internal/prometheus/pdatautil/metadata.yaml new file mode 100644 index 0000000000..f726a58cdf --- /dev/null +++ b/pkg/promotel/internal/prometheus/pdatautil/metadata.yaml @@ -0,0 +1,3 @@ +status: + codeowners: + active: [dmitryax] \ No newline at end of file diff --git a/pkg/promotel/internal/prometheus/pdatautil/package_test.go b/pkg/promotel/internal/prometheus/pdatautil/package_test.go new file mode 100644 index 0000000000..3d5f2523d3 --- /dev/null +++ b/pkg/promotel/internal/prometheus/pdatautil/package_test.go @@ -0,0 +1,11 @@ +package pdatautil + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/pkg/promotel/internal/prometheus/scrape/clientprotobuf.go b/pkg/promotel/internal/prometheus/scrape/clientprotobuf.go new file mode 100644 index 0000000000..88c547e608 --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/clientprotobuf.go @@ -0,0 +1,41 @@ +package scrape + +import ( + "bytes" + "encoding/binary" + + "github.com/gogo/protobuf/proto" + + // Intentionally using client model to simulate client in tests. + dto "github.com/prometheus/client_model/go" +) + +// Write a MetricFamily into a protobuf. +// This function is intended for testing scraping by providing protobuf serialized input. +func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) { + buffer := &bytes.Buffer{} + err := AddMetricFamilyToProtobuf(buffer, metricFamily) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// Append a MetricFamily protobuf representation to a buffer. +// This function is intended for testing scraping by providing protobuf serialized input. +func AddMetricFamilyToProtobuf(buffer *bytes.Buffer, metricFamily *dto.MetricFamily) error { + protoBuf, err := proto.Marshal(metricFamily) + if err != nil { + return err + } + + varintBuf := make([]byte, binary.MaxVarintLen32) + varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) + + _, err = buffer.Write(varintBuf[:varintLength]) + if err != nil { + return err + } + _, err = buffer.Write(protoBuf) + return err +} diff --git a/pkg/promotel/internal/prometheus/scrape/helpers_test.go b/pkg/promotel/internal/prometheus/scrape/helpers_test.go new file mode 100644 index 0000000000..6c59e538c6 --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/helpers_test.go @@ -0,0 +1,182 @@ +package scrape + +import ( + "fmt" + "math" + "math/rand" + "strings" + "sync" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage" +) + +type nopAppender struct{} + +func (a nopAppender) Append(storage.SeriesRef, labels.Labels, int64, float64) (storage.SeriesRef, error) { + return 0, nil +} + +func (a nopAppender) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.Exemplar) (storage.SeriesRef, error) { + return 0, nil +} + +func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) { + return 0, nil +} + +func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { + return 0, nil +} + +func (a nopAppender) AppendCTZeroSample(storage.SeriesRef, labels.Labels, int64, int64) (storage.SeriesRef, error) { + return 0, nil +} + +func (a nopAppender) Commit() error { return nil } +func (a nopAppender) Rollback() error { return nil } + +type floatSample struct { + metric labels.Labels + t int64 + f float64 +} + +func equalFloatSamples(a, b floatSample) bool { + // Compare Float64bits so NaN values which are exactly the same will compare equal. + return labels.Equal(a.metric, b.metric) && a.t == b.t && math.Float64bits(a.f) == math.Float64bits(b.f) +} + +type histogramSample struct { + t int64 + h *histogram.Histogram + fh *histogram.FloatHistogram +} + +// collectResultAppender records all samples that were added through the appender. +// It can be used as its zero value or be backed by another appender it writes samples through. +type collectResultAppender struct { + mtx sync.Mutex + + next storage.Appender + resultFloats []floatSample + pendingFloats []floatSample + rolledbackFloats []floatSample + resultHistograms []histogramSample + pendingHistograms []histogramSample + rolledbackHistograms []histogramSample + resultExemplars []exemplar.Exemplar + pendingExemplars []exemplar.Exemplar + resultMetadata []metadata.Metadata + pendingMetadata []metadata.Metadata +} + +func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.pendingFloats = append(a.pendingFloats, floatSample{ + metric: lset, + t: t, + f: v, + }) + + if ref == 0 { + ref = storage.SeriesRef(rand.Uint64()) + } + if a.next == nil { + return ref, nil + } + + ref, err := a.next.Append(ref, lset, t, v) + if err != nil { + return 0, err + } + return ref, err +} + +func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.pendingExemplars = append(a.pendingExemplars, e) + if a.next == nil { + return 0, nil + } + + return a.next.AppendExemplar(ref, l, e) +} + +func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t}) + if a.next == nil { + return 0, nil + } + + return a.next.AppendHistogram(ref, l, t, h, fh) +} + +func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.pendingMetadata = append(a.pendingMetadata, m) + if ref == 0 { + ref = storage.SeriesRef(rand.Uint64()) + } + if a.next == nil { + return ref, nil + } + + return a.next.UpdateMetadata(ref, l, m) +} + +func (a *collectResultAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { + return a.Append(ref, l, ct, 0.0) +} + +func (a *collectResultAppender) Commit() error { + a.mtx.Lock() + defer a.mtx.Unlock() + a.resultFloats = append(a.resultFloats, a.pendingFloats...) + a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...) + a.resultHistograms = append(a.resultHistograms, a.pendingHistograms...) + a.resultMetadata = append(a.resultMetadata, a.pendingMetadata...) + a.pendingFloats = nil + a.pendingExemplars = nil + a.pendingHistograms = nil + a.pendingMetadata = nil + if a.next == nil { + return nil + } + return a.next.Commit() +} + +func (a *collectResultAppender) Rollback() error { + a.mtx.Lock() + defer a.mtx.Unlock() + a.rolledbackFloats = a.pendingFloats + a.rolledbackHistograms = a.pendingHistograms + a.pendingFloats = nil + a.pendingHistograms = nil + if a.next == nil { + return nil + } + return a.next.Rollback() +} + +func (a *collectResultAppender) String() string { + var sb strings.Builder + for _, s := range a.resultFloats { + sb.WriteString(fmt.Sprintf("committed: %s %f %d\n", s.metric, s.f, s.t)) + } + for _, s := range a.pendingFloats { + sb.WriteString(fmt.Sprintf("pending: %s %f %d\n", s.metric, s.f, s.t)) + } + for _, s := range a.rolledbackFloats { + sb.WriteString(fmt.Sprintf("rolledback: %s %f %d\n", s.metric, s.f, s.t)) + } + return sb.String() +} diff --git a/pkg/promotel/internal/prometheus/scrape/manager.go b/pkg/promotel/internal/prometheus/scrape/manager.go new file mode 100644 index 0000000000..8ebd4ad816 --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/manager.go @@ -0,0 +1,30 @@ +package scrape + +import ( + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" +) + +// Options are the configuration parameters to the scrape manager. +type Options struct { + ExtraMetrics bool + NoDefaultPort bool + // Option used by downstream scraper users like OpenTelemetry Collector + // to help lookup metric metadata. Should be false for Prometheus. + PassMetadataInContext bool + // Option to enable appending of scraped Metadata to the TSDB/other appenders. Individual appenders + // can decide what to do with metadata, but for practical purposes this flag exists so that metadata + // can be written to the WAL and thus read for remote write. + // TODO: implement some form of metadata storage + AppendMetadata bool + // Option to increase the interval used by scrape manager to throttle target groups updates. + DiscoveryReloadInterval model.Duration + // Option to enable the ingestion of the created timestamp as a synthetic zero sample. + // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md + EnableCreatedTimestampZeroIngestion bool + // Option to enable the ingestion of native histograms. + EnableNativeHistogramsIngestion bool + + // Optional HTTP client options to use when scraping. + HTTPClientOptions []config_util.HTTPClientOption +} diff --git a/pkg/promotel/internal/prometheus/scrape/metrics.go b/pkg/promotel/internal/prometheus/scrape/metrics.go new file mode 100644 index 0000000000..ff3a972ba4 --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/metrics.go @@ -0,0 +1,327 @@ +package scrape + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" +) + +type scrapeMetrics struct { + reg prometheus.Registerer + // Used by Manager. + targetMetadataCache *MetadataMetricsCollector + targetScrapePools prometheus.Counter + targetScrapePoolsFailed prometheus.Counter + + // Used by scrapePool. + targetReloadIntervalLength *prometheus.SummaryVec + targetScrapePoolReloads prometheus.Counter + targetScrapePoolReloadsFailed prometheus.Counter + targetScrapePoolSyncsCounter *prometheus.CounterVec + targetScrapePoolExceededTargetLimit prometheus.Counter + targetScrapePoolTargetLimit *prometheus.GaugeVec + targetScrapePoolTargetsAdded *prometheus.GaugeVec + targetScrapePoolSymbolTableItems *prometheus.GaugeVec + targetSyncIntervalLength *prometheus.SummaryVec + targetSyncFailed *prometheus.CounterVec + + // Used by targetScraper. + targetScrapeExceededBodySizeLimit prometheus.Counter + + // Used by scrapeCache. + targetScrapeCacheFlushForced prometheus.Counter + + // Used by scrapeLoop. + targetIntervalLength *prometheus.SummaryVec + targetScrapeSampleLimit prometheus.Counter + targetScrapeSampleDuplicate prometheus.Counter + targetScrapeSampleOutOfOrder prometheus.Counter + targetScrapeSampleOutOfBounds prometheus.Counter + targetScrapeExemplarOutOfOrder prometheus.Counter + targetScrapePoolExceededLabelLimits prometheus.Counter + targetScrapeNativeHistogramBucketLimit prometheus.Counter +} + +func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) { + sm := &scrapeMetrics{reg: reg} + + // Manager metrics. + sm.targetMetadataCache = &MetadataMetricsCollector{ + CacheEntries: prometheus.NewDesc( + "prometheus_target_metadata_cache_entries", + "Total number of metric metadata entries in the cache", + []string{"scrape_job"}, + nil, + ), + CacheBytes: prometheus.NewDesc( + "prometheus_target_metadata_cache_bytes", + "The number of bytes that are currently used for storing metric metadata in the cache", + []string{"scrape_job"}, + nil, + ), + // TargetsGatherer should be set later, because it's a circular dependency. + // newScrapeMetrics() is called by NewManager(), while also TargetsGatherer is the new Manager. + } + + sm.targetScrapePools = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pools_total", + Help: "Total number of scrape pool creation attempts.", + }, + ) + sm.targetScrapePoolsFailed = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pools_failed_total", + Help: "Total number of scrape pool creations that failed.", + }, + ) + + // Used by scrapePool. + sm.targetReloadIntervalLength = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "prometheus_target_reload_length_seconds", + Help: "Actual interval to reload the scrape pool with a given configuration.", + Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, + }, + []string{"interval"}, + ) + sm.targetScrapePoolReloads = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_reloads_total", + Help: "Total number of scrape pool reloads.", + }, + ) + sm.targetScrapePoolReloadsFailed = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_reloads_failed_total", + Help: "Total number of failed scrape pool reloads.", + }, + ) + sm.targetScrapePoolExceededTargetLimit = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_exceeded_target_limit_total", + Help: "Total number of times scrape pools hit the target limit, during sync or config reload.", + }, + ) + sm.targetScrapePoolTargetLimit = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_target_scrape_pool_target_limit", + Help: "Maximum number of targets allowed in this scrape pool.", + }, + []string{"scrape_job"}, + ) + sm.targetScrapePoolTargetsAdded = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_target_scrape_pool_targets", + Help: "Current number of targets in this scrape pool.", + }, + []string{"scrape_job"}, + ) + sm.targetScrapePoolSymbolTableItems = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_target_scrape_pool_symboltable_items", + Help: "Current number of symbols in table for this scrape pool.", + }, + []string{"scrape_job"}, + ) + sm.targetScrapePoolSyncsCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_sync_total", + Help: "Total number of syncs that were executed on a scrape pool.", + }, + []string{"scrape_job"}, + ) + sm.targetSyncIntervalLength = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "prometheus_target_sync_length_seconds", + Help: "Actual interval to sync the scrape pool.", + Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, + }, + []string{"scrape_job"}, + ) + sm.targetSyncFailed = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_target_sync_failed_total", + Help: "Total number of target sync failures.", + }, + []string{"scrape_job"}, + ) + + // Used by targetScraper. + sm.targetScrapeExceededBodySizeLimit = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_exceeded_body_size_limit_total", + Help: "Total number of scrapes that hit the body size limit", + }, + ) + + // Used by scrapeCache. + sm.targetScrapeCacheFlushForced = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_cache_flush_forced_total", + Help: "How many times a scrape cache was flushed due to getting big while scrapes are failing.", + }, + ) + + // Used by scrapeLoop. + sm.targetIntervalLength = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "prometheus_target_interval_length_seconds", + Help: "Actual intervals between scrapes.", + Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, + }, + []string{"interval"}, + ) + sm.targetScrapeSampleLimit = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_exceeded_sample_limit_total", + Help: "Total number of scrapes that hit the sample limit and were rejected.", + }, + ) + sm.targetScrapeSampleDuplicate = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_sample_duplicate_timestamp_total", + Help: "Total number of samples rejected due to duplicate timestamps but different values.", + }, + ) + sm.targetScrapeSampleOutOfOrder = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_sample_out_of_order_total", + Help: "Total number of samples rejected due to not being out of the expected order.", + }, + ) + sm.targetScrapeSampleOutOfBounds = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_sample_out_of_bounds_total", + Help: "Total number of samples rejected due to timestamp falling outside of the time bounds.", + }, + ) + sm.targetScrapePoolExceededLabelLimits = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_exceeded_label_limits_total", + Help: "Total number of times scrape pools hit the label limits, during sync or config reload.", + }, + ) + sm.targetScrapeNativeHistogramBucketLimit = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total", + Help: "Total number of scrapes that hit the native histogram bucket limit and were rejected.", + }, + ) + sm.targetScrapeExemplarOutOfOrder = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_exemplar_out_of_order_total", + Help: "Total number of exemplar rejected due to not being out of the expected order.", + }, + ) + + for _, collector := range []prometheus.Collector{ + // Used by Manager. + sm.targetMetadataCache, + sm.targetScrapePools, + sm.targetScrapePoolsFailed, + // Used by scrapePool. + sm.targetReloadIntervalLength, + sm.targetScrapePoolReloads, + sm.targetScrapePoolReloadsFailed, + sm.targetSyncIntervalLength, + sm.targetScrapePoolSyncsCounter, + sm.targetScrapePoolExceededTargetLimit, + sm.targetScrapePoolTargetLimit, + sm.targetScrapePoolTargetsAdded, + sm.targetScrapePoolSymbolTableItems, + sm.targetSyncFailed, + // Used by targetScraper. + sm.targetScrapeExceededBodySizeLimit, + // Used by scrapeCache. + sm.targetScrapeCacheFlushForced, + // Used by scrapeLoop. + sm.targetIntervalLength, + sm.targetScrapeSampleLimit, + sm.targetScrapeSampleDuplicate, + sm.targetScrapeSampleOutOfOrder, + sm.targetScrapeSampleOutOfBounds, + sm.targetScrapeExemplarOutOfOrder, + sm.targetScrapePoolExceededLabelLimits, + sm.targetScrapeNativeHistogramBucketLimit, + } { + err := reg.Register(collector) + if err != nil { + return nil, fmt.Errorf("failed to register scrape metrics: %w", err) + } + } + return sm, nil +} + +// Unregister unregisters all metrics. +func (sm *scrapeMetrics) Unregister() { + sm.reg.Unregister(sm.targetMetadataCache) + sm.reg.Unregister(sm.targetScrapePools) + sm.reg.Unregister(sm.targetScrapePoolsFailed) + sm.reg.Unregister(sm.targetReloadIntervalLength) + sm.reg.Unregister(sm.targetScrapePoolReloads) + sm.reg.Unregister(sm.targetScrapePoolReloadsFailed) + sm.reg.Unregister(sm.targetSyncIntervalLength) + sm.reg.Unregister(sm.targetScrapePoolSyncsCounter) + sm.reg.Unregister(sm.targetScrapePoolExceededTargetLimit) + sm.reg.Unregister(sm.targetScrapePoolTargetLimit) + sm.reg.Unregister(sm.targetScrapePoolTargetsAdded) + sm.reg.Unregister(sm.targetScrapePoolSymbolTableItems) + sm.reg.Unregister(sm.targetSyncFailed) + sm.reg.Unregister(sm.targetScrapeExceededBodySizeLimit) + sm.reg.Unregister(sm.targetScrapeCacheFlushForced) + sm.reg.Unregister(sm.targetIntervalLength) + sm.reg.Unregister(sm.targetScrapeSampleLimit) + sm.reg.Unregister(sm.targetScrapeSampleDuplicate) + sm.reg.Unregister(sm.targetScrapeSampleOutOfOrder) + sm.reg.Unregister(sm.targetScrapeSampleOutOfBounds) + sm.reg.Unregister(sm.targetScrapeExemplarOutOfOrder) + sm.reg.Unregister(sm.targetScrapePoolExceededLabelLimits) + sm.reg.Unregister(sm.targetScrapeNativeHistogramBucketLimit) +} + +type TargetsGatherer interface { + TargetsActive() map[string][]*Target +} + +// MetadataMetricsCollector is a Custom Collector for the metadata cache metrics. +type MetadataMetricsCollector struct { + CacheEntries *prometheus.Desc + CacheBytes *prometheus.Desc + TargetsGatherer TargetsGatherer +} + +// Describe sends the metrics descriptions to the channel. +func (mc *MetadataMetricsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- mc.CacheEntries + ch <- mc.CacheBytes +} + +// Collect creates and sends the metrics for the metadata cache. +func (mc *MetadataMetricsCollector) Collect(ch chan<- prometheus.Metric) { + if mc.TargetsGatherer == nil { + return + } + + for tset, targets := range mc.TargetsGatherer.TargetsActive() { + var size, length int + for _, t := range targets { + size += t.SizeMetadata() + length += t.LengthMetadata() + } + + ch <- prometheus.MustNewConstMetric( + mc.CacheEntries, + prometheus.GaugeValue, + float64(length), + tset, + ) + + ch <- prometheus.MustNewConstMetric( + mc.CacheBytes, + prometheus.GaugeValue, + float64(size), + tset, + ) + } +} diff --git a/pkg/promotel/internal/prometheus/scrape/promotel.go b/pkg/promotel/internal/prometheus/scrape/promotel.go new file mode 100644 index 0000000000..329ee2e77e --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/promotel.go @@ -0,0 +1,148 @@ +package scrape + +import ( + "context" + "io" + "net/http" + "time" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/storage" + + internaltextparse "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheus/textparse" +) + +type GathereLoop struct { + *scrapeLoop + g prometheus.Gatherer +} + +func (gl *GathereLoop) newParser() (textparse.Parser, error) { + mfs, err := gl.g.Gather() + if err != nil { + _ = gl.l.Log("msg", "Error while gathering metrics", "err", err) + return nil, err + } + return internaltextparse.NewProtobufParserShim(gl.scrapeClassicHistograms, gl.symbolTable, mfs), err +} + +func (gl *GathereLoop) Run(errc chan<- error) { + gl.scrapeLoop.run(errc) +} + +func (gl *GathereLoop) Stop() { + gl.scrapeLoop.stop() +} + +// UnregisterMetrics +func (gl *GathereLoop) UnregisterMetrics() { + if gl.scrapeLoop.metrics != nil { + gl.scrapeLoop.metrics.Unregister() + } +} + +func (gl *GathereLoop) ScrapeAndReport( + last, appendTime time.Time, errc chan<- error, +) time.Time { + return gl.scrapeAndReport(last, appendTime, errc) +} + +func noopScrapeFunc(context.Context, io.Writer) error { return nil } + +func newNoopTarget(lbls labels.Labels) *Target { + return &Target{labels: lbls} +} + +func NewGathererLoop(ctx context.Context, logger log.Logger, app storage.Appendable, reg prometheus.Registerer, g prometheus.Gatherer, interval time.Duration) (*GathereLoop, error) { + nopMutator := func(l labels.Labels) labels.Labels { return l } + metrics, err := newScrapeMetrics(reg) + if err != nil { + return nil, err + } + if logger == nil { + logger = log.NewNopLogger() + } + target := newNoopTarget([]labels.Label{ + {Name: model.JobLabel, Value: "promotel"}, // required label + {Name: model.InstanceLabel, Value: "promotel"}, // required label + {Name: model.ScrapeIntervalLabel, Value: interval.String()}, + {Name: model.MetricsPathLabel, Value: config.DefaultScrapeConfig.MetricsPath}, + {Name: model.SchemeLabel, Value: config.DefaultScrapeConfig.Scheme}, + }) + loop := &GathereLoop{ + newScrapeLoop( + ctx, + &scraperShim{scrapeFunc: noopScrapeFunc}, + logger, + nil, + nopMutator, + nopMutator, + func(ctx context.Context) storage.Appender { return app.Appender(ctx) }, + nil, + labels.NewSymbolTable(), + 0, + true, + false, + true, + 0, + 0, + histogram.ExponentialSchemaMax, + nil, + interval, + time.Hour, + false, + false, + false, + false, // todo: pass this from the opts + false, + target, + true, + metrics, + true, + ), + g, + } + // Override the newParser function to use the gatherer. + loop.scrapeLoop.newParserFunc = loop.newParser + return loop, nil +} + +// scraperShim implements the scraper interface and allows setting values +// returned by its methods. It also allows setting a custom scrape function. +type scraperShim struct { + offsetDur time.Duration + + lastStart time.Time + lastDuration time.Duration + lastError error + + scrapeErr error + scrapeFunc func(context.Context, io.Writer) error +} + +func (ts *scraperShim) offset(time.Duration, uint64) time.Duration { + return ts.offsetDur +} + +func (ts *scraperShim) Report(start time.Time, duration time.Duration, err error) { + ts.lastStart = start + ts.lastDuration = duration + ts.lastError = err +} + +func (ts *scraperShim) scrape(ctx context.Context) (*http.Response, error) { + return nil, ts.scrapeErr +} + +func (ts *scraperShim) readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error) { + if ts.scrapeFunc != nil { + return "", ts.scrapeFunc(ctx, w) + } + return "", ts.scrapeErr +} diff --git a/pkg/promotel/internal/prometheus/scrape/promotel_test.go b/pkg/promotel/internal/prometheus/scrape/promotel_test.go new file mode 100644 index 0000000000..4eb7d15164 --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/promotel_test.go @@ -0,0 +1,227 @@ +package scrape_test + +import ( + "context" + "fmt" + "math/rand" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheus/scrape" +) + +// TestScrapeLoopScrapeAndReport exercises scrapeAndReport with various scenarios +// (successful scrape, failed scrape, forced error, empty body leading to staleness, etc.). +func TestScrapeLoopScrapeAndReport(t *testing.T) { + appendable := &collectResultAppendable{&testAppender{}} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + reg := prometheus.NewRegistry() + + sl, err := scrape.NewGathererLoop(ctx, nil, appendable, reg, reg, 10*time.Millisecond) + require.NoError(t, err) + + start := time.Now() + sl.ScrapeAndReport(time.Time{}, start, nil) + // The collectResultAppender holds all appended samples. Check the last appended + // for staleness or actual data, depending on if the scrape was declared OK. + allSamples := appendable.resultFloats + // We expect at least one normal sample plus the reported samples. + require.NotEmpty(t, allSamples, "Expected to see appended samples.") + + // reset the appender + appendable.testAppender = &testAppender{} + // create counter metric + counter := prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "metric_a", + Help: "metric_a help", + }, []string{"label_a"}) + reg.MustRegister(counter) + counter.WithLabelValues("value_a").Add(42) + + mfs, err := reg.Gather() + require.NoError(t, err) + // verify that metric_a is present in Gatherer results + var foundMetric bool + for _, mf := range mfs { + if mf.GetName() == "metric_a" { + // verify metrics value + require.Len(t, mf.GetMetric(), 1) + require.Equal(t, "value_a", mf.GetMetric()[0].GetLabel()[0].GetValue()) + require.Equal(t, 42.0, mf.GetMetric()[0].GetCounter().GetValue()) // nolint + foundMetric = true + break + } + } + require.True(t, foundMetric, "Expected to see the 'metric_a' counter metric.") + + sl.ScrapeAndReport(time.Time{}, start, nil) + // Get all appended samples + allSamples = appendable.resultFloats + // verify that the counter metric 'metric_a' was reported + var found bool + for _, s := range allSamples { + if s.metric.Get("__name__") == "metric_a" && s.metric.Get("label_a") == "value_a" { + found = true + require.Equal(t, 42.0, s.f) // nolint + } + } + require.True(t, found, "Expected to see the 'metric_a' counter metric.") +} + +type floatSample struct { + metric labels.Labels + t int64 + f float64 +} + +type histogramSample struct { + t int64 + h *histogram.Histogram + fh *histogram.FloatHistogram +} + +type collectResultAppendable struct { + *testAppender +} + +func (a *collectResultAppendable) Appender(_ context.Context) storage.Appender { + return a +} + +// testAppender records all samples that were added through the appender. +// It can be used as its zero value or be backed by another appender it writes samples through. +type testAppender struct { + mtx sync.Mutex + + next storage.Appender + resultFloats []floatSample + pendingFloats []floatSample + rolledbackFloats []floatSample + resultHistograms []histogramSample + pendingHistograms []histogramSample + rolledbackHistograms []histogramSample + resultExemplars []exemplar.Exemplar + pendingExemplars []exemplar.Exemplar + resultMetadata []metadata.Metadata + pendingMetadata []metadata.Metadata +} + +func (a *testAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.pendingFloats = append(a.pendingFloats, floatSample{ + metric: lset, + t: t, + f: v, + }) + + if ref == 0 { + ref = storage.SeriesRef(rand.Uint64()) + } + if a.next == nil { + return ref, nil + } + + ref, err := a.next.Append(ref, lset, t, v) + if err != nil { + return 0, err + } + return ref, err +} + +func (a *testAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.pendingExemplars = append(a.pendingExemplars, e) + if a.next == nil { + return 0, nil + } + + return a.next.AppendExemplar(ref, l, e) +} + +func (a *testAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t}) + if a.next == nil { + return 0, nil + } + + return a.next.AppendHistogram(ref, l, t, h, fh) +} + +func (a *testAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.pendingMetadata = append(a.pendingMetadata, m) + if ref == 0 { + ref = storage.SeriesRef(rand.Uint64()) + } + if a.next == nil { + return ref, nil + } + + return a.next.UpdateMetadata(ref, l, m) +} + +func (a *testAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { + return a.Append(ref, l, ct, 0.0) +} + +func (a *testAppender) Commit() error { + a.mtx.Lock() + defer a.mtx.Unlock() + a.resultFloats = append(a.resultFloats, a.pendingFloats...) + a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...) + a.resultHistograms = append(a.resultHistograms, a.pendingHistograms...) + a.resultMetadata = append(a.resultMetadata, a.pendingMetadata...) + a.pendingFloats = nil + a.pendingExemplars = nil + a.pendingHistograms = nil + a.pendingMetadata = nil + if a.next == nil { + return nil + } + return a.next.Commit() +} + +func (a *testAppender) Rollback() error { + a.mtx.Lock() + defer a.mtx.Unlock() + a.rolledbackFloats = a.pendingFloats + a.rolledbackHistograms = a.pendingHistograms + a.pendingFloats = nil + a.pendingHistograms = nil + if a.next == nil { + return nil + } + return a.next.Rollback() +} + +func (a *testAppender) String() string { + var sb strings.Builder + for _, s := range a.resultFloats { + sb.WriteString(fmt.Sprintf("committed: %s %f %d\n", s.metric, s.f, s.t)) + } + for _, s := range a.pendingFloats { + sb.WriteString(fmt.Sprintf("pending: %s %f %d\n", s.metric, s.f, s.t)) + } + for _, s := range a.rolledbackFloats { + sb.WriteString(fmt.Sprintf("rolledback: %s %f %d\n", s.metric, s.f, s.t)) + } + return sb.String() +} diff --git a/pkg/promotel/internal/prometheus/scrape/scrape.go b/pkg/promotel/internal/prometheus/scrape/scrape.go new file mode 100644 index 0000000000..7957436b2c --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/scrape.go @@ -0,0 +1,1608 @@ +package scrape + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net/http" + "reflect" + "slices" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/klauspost/compress/gzip" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/common/version" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/pool" +) + +// ScrapeTimestampTolerance is the tolerance for scrape appends timestamps +// alignment, to enable better compression at the TSDB level. +// See https://github.com/prometheus/prometheus/issues/7846 +var ScrapeTimestampTolerance = 2 * time.Millisecond + +// AlignScrapeTimestamps enables the tolerance for scrape appends timestamps described above. +var AlignScrapeTimestamps = true + +var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels.MetricName) + +type labelLimits struct { + labelLimit int + labelNameLengthLimit int + labelValueLengthLimit int +} + +const maxAheadTime = 10 * time.Minute + +// returning an empty label set is interpreted as "drop". +type labelsMutator func(labels.Labels) labels.Labels + +func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error { + if limits == nil { + return nil + } + + met := lset.Get(labels.MetricName) + if limits.labelLimit > 0 { + nbLabels := lset.Len() + if nbLabels > limits.labelLimit { + return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit) + } + } + + if limits.labelNameLengthLimit == 0 && limits.labelValueLengthLimit == 0 { + return nil + } + + return lset.Validate(func(l labels.Label) error { + if limits.labelNameLengthLimit > 0 { + nameLength := len(l.Name) + if nameLength > limits.labelNameLengthLimit { + return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label name: %.50s, length: %d, limit: %d)", met, l.Name, nameLength, limits.labelNameLengthLimit) + } + } + + if limits.labelValueLengthLimit > 0 { + valueLength := len(l.Value) + if valueLength > limits.labelValueLengthLimit { + return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit) + } + } + return nil + }) +} + +func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels { + lb := labels.NewBuilder(lset) + + if honor { + target.LabelsRange(func(l labels.Label) { + if !lset.Has(l.Name) { + lb.Set(l.Name, l.Value) + } + }) + } else { + var conflictingExposedLabels []labels.Label + target.LabelsRange(func(l labels.Label) { + existingValue := lset.Get(l.Name) + if existingValue != "" { + conflictingExposedLabels = append(conflictingExposedLabels, labels.Label{Name: l.Name, Value: existingValue}) + } + // It is now safe to set the target label. + lb.Set(l.Name, l.Value) + }) + + if len(conflictingExposedLabels) > 0 { + resolveConflictingExposedLabels(lb, conflictingExposedLabels) + } + } + + res := lb.Labels() + + if len(rc) > 0 { + res, _ = relabel.Process(res, rc...) + } + + return res +} + +func resolveConflictingExposedLabels(lb *labels.Builder, conflictingExposedLabels []labels.Label) { + slices.SortStableFunc(conflictingExposedLabels, func(a, b labels.Label) int { + return len(a.Name) - len(b.Name) + }) + + for _, l := range conflictingExposedLabels { + newName := l.Name + for { + newName = model.ExportedLabelPrefix + newName + if lb.Get(newName) == "" { + lb.Set(newName, l.Value) + break + } + } + } +} + +func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels { + lb := labels.NewBuilder(lset) + + target.LabelsRange(func(l labels.Label) { + lb.Set(model.ExportedLabelPrefix+l.Name, lset.Get(l.Name)) + lb.Set(l.Name, l.Value) + }) + + return lb.Labels() +} + +// appender returns an appender for ingested samples from the target. +func appender(app storage.Appender, sampleLimit, bucketLimit int, maxSchema int32) storage.Appender { + app = &timeLimitAppender{ + Appender: app, + maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), + } + + // The sampleLimit is applied after metrics are potentially dropped via relabeling. + if sampleLimit > 0 { + app = &limitAppender{ + Appender: app, + limit: sampleLimit, + } + } + + if bucketLimit > 0 { + app = &bucketLimitAppender{ + Appender: app, + limit: bucketLimit, + } + } + + if maxSchema < histogram.ExponentialSchemaMax { + app = &maxSchemaAppender{ + Appender: app, + maxSchema: maxSchema, + } + } + + return app +} + +// A scraper retrieves samples and accepts a status report at the end. +type scraper interface { + scrape(ctx context.Context) (*http.Response, error) + readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error) + Report(start time.Time, dur time.Duration, err error) + offset(interval time.Duration, offsetSeed uint64) time.Duration +} + +// targetScraper implements the scraper interface for a target. +type targetScraper struct { + *Target + + client *http.Client + req *http.Request + timeout time.Duration + + gzipr *gzip.Reader + buf *bufio.Reader + + bodySizeLimit int64 + acceptHeader string + acceptEncodingHeader string + + metrics *scrapeMetrics +} + +func newScraper(ts *targetScraper) scraper { + if handler := GetDefaultGathererHandler(); handler != nil { + return &gathererScraper{ts, handler} + } + return ts +} + +var errBodySizeLimit = errors.New("body size limit exceeded") + +// acceptHeader transforms preference from the options into specific header values as +// https://www.rfc-editor.org/rfc/rfc9110.html#name-accept defines. +// No validation is here, we expect scrape protocols to be validated already. +func acceptHeader(sps []config.ScrapeProtocol) string { + vals := make([]string, 0, len(sps)+1) + weight := len(config.ScrapeProtocolsHeaders) + 1 + for _, sp := range sps { + vals = append(vals, fmt.Sprintf("%s;q=0.%d", config.ScrapeProtocolsHeaders[sp], weight)) + weight-- + } + // Default match anything. + vals = append(vals, fmt.Sprintf("*/*;q=0.%d", weight)) + return strings.Join(vals, ",") +} + +var UserAgent = "Prometheus/" + version.Version + +func (s *targetScraper) scrapeRequest() (*http.Request, error) { + if s.req == nil { + req, err := http.NewRequest(http.MethodGet, s.URL().String(), nil) + if err != nil { + return nil, err + } + req.Header.Add("Accept", s.acceptHeader) + req.Header.Add("Accept-Encoding", s.acceptEncodingHeader) + req.Header.Set("User-Agent", UserAgent) + req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", strconv.FormatFloat(s.timeout.Seconds(), 'f', -1, 64)) + + s.req = req + } + return s.req, nil +} + +func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) { + req, err := s.scrapeRequest() + if err != nil { + return nil, err + } + return s.client.Do(req.WithContext(ctx)) +} + +func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error) { + defer func() { + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + }() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("server returned HTTP status %s", resp.Status) + } + + if s.bodySizeLimit <= 0 { + s.bodySizeLimit = math.MaxInt64 + } + if resp.Header.Get("Content-Encoding") != "gzip" { + n, err := io.Copy(w, io.LimitReader(resp.Body, s.bodySizeLimit)) + if err != nil { + return "", err + } + if n >= s.bodySizeLimit { + s.metrics.targetScrapeExceededBodySizeLimit.Inc() + return "", errBodySizeLimit + } + return resp.Header.Get("Content-Type"), nil + } + + if s.gzipr == nil { + s.buf = bufio.NewReader(resp.Body) + var err error + s.gzipr, err = gzip.NewReader(s.buf) + if err != nil { + return "", err + } + } else { + s.buf.Reset(resp.Body) + if err := s.gzipr.Reset(s.buf); err != nil { + return "", err + } + } + + n, err := io.Copy(w, io.LimitReader(s.gzipr, s.bodySizeLimit)) + s.gzipr.Close() + if err != nil { + return "", err + } + if n >= s.bodySizeLimit { + s.metrics.targetScrapeExceededBodySizeLimit.Inc() + return "", errBodySizeLimit + } + return resp.Header.Get("Content-Type"), nil +} + +type cacheEntry struct { + ref storage.SeriesRef + lastIter uint64 + hash uint64 + lset labels.Labels +} + +type scrapeLoop struct { + scraper scraper + l log.Logger + cache *scrapeCache + lastScrapeSize int + buffers *pool.Pool + offsetSeed uint64 + honorTimestamps bool + trackTimestampsStaleness bool + enableCompression bool + forcedErr error + forcedErrMtx sync.Mutex + sampleLimit int + bucketLimit int + maxSchema int32 + labelLimits *labelLimits + interval time.Duration + timeout time.Duration + scrapeClassicHistograms bool + + // Feature flagged options. + enableNativeHistogramIngestion bool + enableCTZeroIngestion bool + + appender func(ctx context.Context) storage.Appender + symbolTable *labels.SymbolTable + sampleMutator labelsMutator + reportSampleMutator labelsMutator + + parentCtx context.Context // nolint + appenderCtx context.Context // nolint + ctx context.Context // nolint + cancel func() + stopped chan struct{} + + disabledEndOfRunStalenessMarkers bool + + reportExtraMetrics bool + appendMetadataToWAL bool + + metrics *scrapeMetrics + + skipOffsetting bool // For testability. + + newParserFunc func() (textparse.Parser, error) +} + +// scrapeCache tracks mappings of exposed metric strings to label sets and +// storage references. Additionally, it tracks staleness of series between +// scrapes. +type scrapeCache struct { + iter uint64 // Current scrape iteration. + + // How many series and metadata entries there were at the last success. + successfulCount int + + // Parsed string to an entry with information about the actual label set + // and its storage reference. + series map[string]*cacheEntry + + // Cache of dropped metric strings and their iteration. The iteration must + // be a pointer so we can update it. + droppedSeries map[string]*uint64 + + // seriesCur and seriesPrev store the labels of series that were seen + // in the current and previous scrape. + // We hold two maps and swap them out to save allocations. + seriesCur map[uint64]labels.Labels + seriesPrev map[uint64]labels.Labels + + metaMtx sync.Mutex + metadata map[string]*metaEntry + + metrics *scrapeMetrics +} + +// metaEntry holds meta information about a metric. +type metaEntry struct { + metadata.Metadata + + lastIter uint64 // Last scrape iteration the entry was observed at. + lastIterChange uint64 // Last scrape iteration the entry was changed at. +} + +func (m *metaEntry) size() int { + // The attribute lastIter although part of the struct it is not metadata. + return len(m.Help) + len(m.Unit) + len(m.Type) +} + +func newScrapeCache(metrics *scrapeMetrics) *scrapeCache { + return &scrapeCache{ + series: map[string]*cacheEntry{}, + droppedSeries: map[string]*uint64{}, + seriesCur: map[uint64]labels.Labels{}, + seriesPrev: map[uint64]labels.Labels{}, + metadata: map[string]*metaEntry{}, + metrics: metrics, + } +} + +func (c *scrapeCache) iterDone(flushCache bool) { + c.metaMtx.Lock() + count := len(c.series) + len(c.droppedSeries) + len(c.metadata) + c.metaMtx.Unlock() + + switch { + case flushCache: + c.successfulCount = count + case count > c.successfulCount*2+1000: + // If a target had varying labels in scrapes that ultimately failed, + // the caches would grow indefinitely. Force a flush when this happens. + // We use the heuristic that this is a doubling of the cache size + // since the last scrape, and allow an additional 1000 in case + // initial scrapes all fail. + flushCache = true + c.metrics.targetScrapeCacheFlushForced.Inc() + } + + if flushCache { + // All caches may grow over time through series churn + // or multiple string representations of the same metric. Clean up entries + // that haven't appeared in the last scrape. + for s, e := range c.series { + if c.iter != e.lastIter { + delete(c.series, s) + } + } + for s, iter := range c.droppedSeries { + if c.iter != *iter { + delete(c.droppedSeries, s) + } + } + c.metaMtx.Lock() + for m, e := range c.metadata { + // Keep metadata around for 10 scrapes after its metric disappeared. + if c.iter-e.lastIter > 10 { + delete(c.metadata, m) + } + } + c.metaMtx.Unlock() + + c.iter++ + } + + // Swap current and previous series. + c.seriesPrev, c.seriesCur = c.seriesCur, c.seriesPrev + + // We have to delete every single key in the map. + for k := range c.seriesCur { + delete(c.seriesCur, k) + } +} + +func (c *scrapeCache) get(met []byte) (*cacheEntry, bool, bool) { + e, ok := c.series[string(met)] + if !ok { + return nil, false, false + } + alreadyScraped := e.lastIter == c.iter + e.lastIter = c.iter + return e, true, alreadyScraped +} + +func (c *scrapeCache) addRef(met []byte, ref storage.SeriesRef, lset labels.Labels, hash uint64) { + if ref == 0 { + return + } + c.series[string(met)] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash} +} + +func (c *scrapeCache) addDropped(met []byte) { + iter := c.iter + c.droppedSeries[string(met)] = &iter +} + +func (c *scrapeCache) getDropped(met []byte) bool { + iterp, ok := c.droppedSeries[string(met)] + if ok { + *iterp = c.iter + } + return ok +} + +func (c *scrapeCache) trackStaleness(hash uint64, lset labels.Labels) { + c.seriesCur[hash] = lset +} + +func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) { + for h, lset := range c.seriesPrev { + if _, ok := c.seriesCur[h]; !ok { + if !f(lset) { + break + } + } + } +} + +func (c *scrapeCache) setType(metric []byte, t model.MetricType) { + c.metaMtx.Lock() + + e, ok := c.metadata[string(metric)] + if !ok { + e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} + c.metadata[string(metric)] = e + } + if e.Type != t { + e.Type = t + e.lastIterChange = c.iter + } + e.lastIter = c.iter + + c.metaMtx.Unlock() +} + +func (c *scrapeCache) setHelp(metric, help []byte) { + c.metaMtx.Lock() + + e, ok := c.metadata[string(metric)] + if !ok { + e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} + c.metadata[string(metric)] = e + } + if e.Help != string(help) { + e.Help = string(help) + e.lastIterChange = c.iter + } + e.lastIter = c.iter + + c.metaMtx.Unlock() +} + +func (c *scrapeCache) setUnit(metric, unit []byte) { + c.metaMtx.Lock() + + e, ok := c.metadata[string(metric)] + if !ok { + e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} + c.metadata[string(metric)] = e + } + if e.Unit != string(unit) { + e.Unit = string(unit) + e.lastIterChange = c.iter + } + e.lastIter = c.iter + + c.metaMtx.Unlock() +} + +func (c *scrapeCache) GetMetadata(metric string) (MetricMetadata, bool) { + c.metaMtx.Lock() + defer c.metaMtx.Unlock() + + m, ok := c.metadata[metric] + if !ok { + return MetricMetadata{}, false + } + return MetricMetadata{ + Metric: metric, + Type: m.Type, + Help: m.Help, + Unit: m.Unit, + }, true +} + +func (c *scrapeCache) ListMetadata() []MetricMetadata { + c.metaMtx.Lock() + defer c.metaMtx.Unlock() + + res := make([]MetricMetadata, 0, len(c.metadata)) + + for m, e := range c.metadata { + res = append(res, MetricMetadata{ + Metric: m, + Type: e.Type, + Help: e.Help, + Unit: e.Unit, + }) + } + return res +} + +// MetadataSize returns the size of the metadata cache. +func (c *scrapeCache) SizeMetadata() (s int) { + c.metaMtx.Lock() + defer c.metaMtx.Unlock() + for _, e := range c.metadata { + s += e.size() + } + + return s +} + +// MetadataLen returns the number of metadata entries in the cache. +func (c *scrapeCache) LengthMetadata() int { + c.metaMtx.Lock() + defer c.metaMtx.Unlock() + + return len(c.metadata) +} + +func newScrapeLoop(ctx context.Context, + sc scraper, + l log.Logger, + buffers *pool.Pool, + sampleMutator labelsMutator, + reportSampleMutator labelsMutator, + appender func(ctx context.Context) storage.Appender, + cache *scrapeCache, + symbolTable *labels.SymbolTable, + offsetSeed uint64, + honorTimestamps bool, + trackTimestampsStaleness bool, + enableCompression bool, + sampleLimit int, + bucketLimit int, + maxSchema int32, + labelLimits *labelLimits, + interval time.Duration, + timeout time.Duration, + scrapeClassicHistograms bool, + enableNativeHistogramIngestion bool, + enableCTZeroIngestion bool, + reportExtraMetrics bool, + appendMetadataToWAL bool, + target *Target, + passMetadataInContext bool, + metrics *scrapeMetrics, + skipOffsetting bool, +) *scrapeLoop { + if l == nil { + l = log.NewNopLogger() + } + if buffers == nil { + buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) + } + if cache == nil { + cache = newScrapeCache(metrics) + } + + appenderCtx := ctx + + if passMetadataInContext { + // Store the cache and target in the context. This is then used by downstream OTel Collector + // to lookup the metadata required to process the samples. Not used by Prometheus itself. + // TODO(gouthamve) We're using a dedicated context because using the parentCtx caused a memory + // leak. We should ideally fix the main leak. See: https://github.com/prometheus/prometheus/pull/10590 + appenderCtx = ContextWithMetricMetadataStore(appenderCtx, cache) + appenderCtx = ContextWithTarget(appenderCtx, target) + } + + sl := &scrapeLoop{ + scraper: sc, + buffers: buffers, + cache: cache, + appender: appender, + symbolTable: symbolTable, + sampleMutator: sampleMutator, + reportSampleMutator: reportSampleMutator, + stopped: make(chan struct{}), + offsetSeed: offsetSeed, + l: l, + parentCtx: ctx, + appenderCtx: appenderCtx, + honorTimestamps: honorTimestamps, + trackTimestampsStaleness: trackTimestampsStaleness, + enableCompression: enableCompression, + sampleLimit: sampleLimit, + bucketLimit: bucketLimit, + maxSchema: maxSchema, + labelLimits: labelLimits, + interval: interval, + timeout: timeout, + scrapeClassicHistograms: scrapeClassicHistograms, + enableNativeHistogramIngestion: enableNativeHistogramIngestion, + enableCTZeroIngestion: enableCTZeroIngestion, + reportExtraMetrics: reportExtraMetrics, + appendMetadataToWAL: appendMetadataToWAL, + metrics: metrics, + skipOffsetting: skipOffsetting, + } + sl.ctx, sl.cancel = context.WithCancel(ctx) + + return sl +} + +func (sl *scrapeLoop) run(errc chan<- error) { + if !sl.skipOffsetting { + select { + case <-time.After(sl.scraper.offset(sl.interval, sl.offsetSeed)): + // Continue after a scraping offset. + case <-sl.ctx.Done(): + close(sl.stopped) + return + } + } + + var last time.Time + + alignedScrapeTime := time.Now().Round(0) + ticker := time.NewTicker(sl.interval) + defer ticker.Stop() + +mainLoop: + for { + select { + case <-sl.parentCtx.Done(): + close(sl.stopped) + return + case <-sl.ctx.Done(): + break mainLoop + default: + } + + // Temporary workaround for a jitter in go timers that causes disk space + // increase in TSDB. + // See https://github.com/prometheus/prometheus/issues/7846 + // Calling Round ensures the time used is the wall clock, as otherwise .Sub + // and .Add on time.Time behave differently (see time package docs). + scrapeTime := time.Now().Round(0) + if AlignScrapeTimestamps { + // Tolerance is clamped to maximum 1% of the scrape interval. + tolerance := min(sl.interval/100, ScrapeTimestampTolerance) + // For some reason, a tick might have been skipped, in which case we + // would call alignedScrapeTime.Add(interval) multiple times. + for scrapeTime.Sub(alignedScrapeTime) >= sl.interval { + alignedScrapeTime = alignedScrapeTime.Add(sl.interval) + } + // Align the scrape time if we are in the tolerance boundaries. + if scrapeTime.Sub(alignedScrapeTime) <= tolerance { + scrapeTime = alignedScrapeTime + } + } + + last = sl.scrapeAndReport(last, scrapeTime, errc) + + select { + case <-sl.parentCtx.Done(): + close(sl.stopped) + return + case <-sl.ctx.Done(): + break mainLoop + case <-ticker.C: + } + } + + close(sl.stopped) + + if !sl.disabledEndOfRunStalenessMarkers { + sl.endOfRunStaleness(last, ticker, sl.interval) + } +} + +// scrapeAndReport performs a scrape and then appends the result to the storage +// together with reporting metrics, by using as few appenders as possible. +// In the happy scenario, a single appender is used. +// This function uses sl.appenderCtx instead of sl.ctx on purpose. A scrape should +// only be cancelled on shutdown, not on reloads. +func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- error) time.Time { + start := time.Now() + + // Only record after the first scrape. + if !last.IsZero() { + sl.metrics.targetIntervalLength.WithLabelValues(sl.interval.String()).Observe( + time.Since(last).Seconds(), + ) + } + + var total, added, seriesAdded, bytesRead int + var err, appErr, scrapeErr error + + app := sl.appender(sl.appenderCtx) + defer func() { + if err != nil { + _ = app.Rollback() + return + } + err = app.Commit() + if err != nil { + _ = level.Error(sl.l).Log("msg", "Scrape commit failed", "err", err) + } + }() + + defer func() { + if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytesRead, scrapeErr); err != nil { + _ = level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err) + } + }() + + if forcedErr := sl.getForcedError(); forcedErr != nil { + scrapeErr = forcedErr + // Add stale markers. + if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { + _ = app.Rollback() + app = sl.appender(sl.appenderCtx) + _ = level.Warn(sl.l).Log("msg", "Append failed", "err", err) + } + if errc != nil { + errc <- forcedErr + } + + return start + } + + var contentType string + var resp *http.Response + var b []byte + var buf *bytes.Buffer + scrapeCtx, cancel := context.WithTimeout(sl.parentCtx, sl.timeout) + resp, scrapeErr = sl.scraper.scrape(scrapeCtx) + if scrapeErr == nil { + b = sl.buffers.Get(sl.lastScrapeSize).([]byte) + defer sl.buffers.Put(b) + buf = bytes.NewBuffer(b) + contentType, scrapeErr = sl.scraper.readResponse(scrapeCtx, resp, buf) + } + cancel() + + if scrapeErr == nil { + b = buf.Bytes() + // NOTE: There were issues with misbehaving clients in the past + // that occasionally returned empty results. We don't want those + // to falsely reset our buffer size. + if len(b) > 0 { + sl.lastScrapeSize = len(b) + } + bytesRead = len(b) + } else { + _ = level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr) + if errc != nil { + errc <- scrapeErr + } + if errors.Is(scrapeErr, errBodySizeLimit) { + bytesRead = -1 + } + } + + // A failed scrape is the same as an empty scrape, + // we still call sl.append to trigger stale markers. + total, added, seriesAdded, appErr = sl.append(app, b, contentType, appendTime) + if appErr != nil { + _ = app.Rollback() + app = sl.appender(sl.appenderCtx) + _ = level.Debug(sl.l).Log("msg", "Append failed", "err", appErr) + // The append failed, probably due to a parse error or sample limit. + // Call sl.append again with an empty scrape to trigger stale markers. + if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { + _ = app.Rollback() + app = sl.appender(sl.appenderCtx) + _ = level.Warn(sl.l).Log("msg", "Append failed", "err", err) + } + } + + if scrapeErr == nil { + scrapeErr = appErr + } + + return start +} + +func (sl *scrapeLoop) setForcedError(err error) { + sl.forcedErrMtx.Lock() + defer sl.forcedErrMtx.Unlock() + sl.forcedErr = err +} + +func (sl *scrapeLoop) getForcedError() error { + sl.forcedErrMtx.Lock() + defer sl.forcedErrMtx.Unlock() + return sl.forcedErr +} + +func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, interval time.Duration) { + // Scraping has stopped. We want to write stale markers but + // the target may be recreated, so we wait just over 2 scrape intervals + // before creating them. + // If the context is canceled, we presume the server is shutting down + // and will restart where is was. We do not attempt to write stale markers + // in this case. + + if last.IsZero() { + // There never was a scrape, so there will be no stale markers. + return + } + + // Wait for when the next scrape would have been, record its timestamp. + var staleTime time.Time + select { + case <-sl.parentCtx.Done(): + return + case <-ticker.C: + staleTime = time.Now() + } + + // Wait for when the next scrape would have been, if the target was recreated + // samples should have been ingested by now. + select { + case <-sl.parentCtx.Done(): + return + case <-ticker.C: + } + + // Wait for an extra 10% of the interval, just to be safe. + select { + case <-sl.parentCtx.Done(): + return + case <-time.After(interval / 10): + } + + // Call sl.append again with an empty scrape to trigger stale markers. + // If the target has since been recreated and scraped, the + // stale markers will be out of order and ignored. + // sl.context would have been cancelled, hence using sl.appenderCtx. + app := sl.appender(sl.appenderCtx) + var err error + defer func() { + if err != nil { + _ = app.Rollback() + return + } + err = app.Commit() + if err != nil { + _ = level.Warn(sl.l).Log("msg", "Stale commit failed", "err", err) + } + }() + if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil { + _ = app.Rollback() + app = sl.appender(sl.appenderCtx) + _ = level.Warn(sl.l).Log("msg", "Stale append failed", "err", err) + } + if err = sl.reportStale(app, staleTime); err != nil { + _ = level.Warn(sl.l).Log("msg", "Stale report failed", "err", err) + } +} + +// Stop the scraping. May still write data and stale markers after it has +// returned. Cancel the context to stop all writes. +func (sl *scrapeLoop) stop() { + sl.cancel() + <-sl.stopped +} + +func (sl *scrapeLoop) getCache() *scrapeCache { + return sl.cache +} + +type appendErrors struct { + numOutOfOrder int + numDuplicates int + numOutOfBounds int + numExemplarOutOfOrder int +} + +func (sl *scrapeLoop) newParser(b []byte, contentType string) (textparse.Parser, error) { + if sl.newParserFunc != nil { + return sl.newParserFunc() + } + return textparse.New(b, contentType, sl.scrapeClassicHistograms, sl.symbolTable) +} + +func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { + p, err := sl.newParser(b, contentType) + + if err != nil { + _ = level.Debug(sl.l).Log( + "msg", "Invalid content type on scrape, using prometheus parser as fallback.", + "content_type", contentType, + "err", err, + ) + } + + var ( + defTime = timestamp.FromTime(ts) + appErrs = appendErrors{} + sampleLimitErr error + bucketLimitErr error + lset labels.Labels // escapes to heap so hoisted out of loop + e exemplar.Exemplar // escapes to heap so hoisted out of loop + meta metadata.Metadata + metadataChanged bool + ) + + exemplars := make([]exemplar.Exemplar, 1) + + // updateMetadata updates the current iteration's metadata object and the + // metadataChanged value if we have metadata in the scrape cache AND the + // labelset is for a new series or the metadata for this series has just + // changed. It returns a boolean based on whether the metadata was updated. + updateMetadata := func(lset labels.Labels, isNewSeries bool) bool { + if !sl.appendMetadataToWAL { + return false + } + + sl.cache.metaMtx.Lock() + defer sl.cache.metaMtx.Unlock() + metaEntry, metaOk := sl.cache.metadata[lset.Get(labels.MetricName)] + if metaOk && (isNewSeries || metaEntry.lastIterChange == sl.cache.iter) { + metadataChanged = true + meta.Type = metaEntry.Type + meta.Unit = metaEntry.Unit + meta.Help = metaEntry.Help + return true + } + return false + } + + // Take an appender with limits. + app = appender(app, sl.sampleLimit, sl.bucketLimit, sl.maxSchema) + + defer func() { + if err != nil { + return + } + // Only perform cache cleaning if the scrape was not empty. + // An empty scrape (usually) is used to indicate a failed scrape. + sl.cache.iterDone(len(b) > 0) + }() + +loop: + for { + var ( + et textparse.Entry + sampleAdded, isHistogram bool + met []byte + parsedTimestamp *int64 + val float64 + h *histogram.Histogram + fh *histogram.FloatHistogram + ) + if et, err = p.Next(); err != nil { + if errors.Is(err, io.EOF) { + err = nil + } + break + } + switch et { + case textparse.EntryType: + sl.cache.setType(p.Type()) + continue + case textparse.EntryHelp: + sl.cache.setHelp(p.Help()) + continue + case textparse.EntryUnit: + sl.cache.setUnit(p.Unit()) + continue + case textparse.EntryComment: + continue + case textparse.EntryHistogram: + isHistogram = true + default: + } + total++ + + t := defTime + if isHistogram { + met, parsedTimestamp, h, fh = p.Histogram() + } else { + met, parsedTimestamp, val = p.Series() + } + if !sl.honorTimestamps { + parsedTimestamp = nil + } + if parsedTimestamp != nil { + t = *parsedTimestamp + } + + // Zero metadata out for current iteration until it's resolved. + meta = metadata.Metadata{} + metadataChanged = false + + if sl.cache.getDropped(met) { + continue + } + ce, ok, seriesAlreadyScraped := sl.cache.get(met) + var ( + ref storage.SeriesRef + hash uint64 + ) + + if ok { + ref = ce.ref + lset = ce.lset + hash = ce.hash + + // Update metadata only if it changed in the current iteration. + updateMetadata(lset, false) + } else { + p.Metric(&lset) + hash = lset.Hash() + + // Hash label set as it is seen local to the target. Then add target labels + // and relabeling and store the final label set. + lset = sl.sampleMutator(lset) + + // The label set may be set to empty to indicate dropping. + if lset.IsEmpty() { + sl.cache.addDropped(met) + continue + } + + if !lset.Has(labels.MetricName) { + err = errNameLabelMandatory + break loop + } + if !lset.IsValid() { + err = fmt.Errorf("invalid metric name or label names: %s", lset.String()) + break loop + } + + // If any label limits is exceeded the scrape should fail. + if err = verifyLabelLimits(lset, sl.labelLimits); err != nil { + sl.metrics.targetScrapePoolExceededLabelLimits.Inc() + break loop + } + + // Append metadata for new series if they were present. + updateMetadata(lset, true) + } + + if seriesAlreadyScraped && parsedTimestamp == nil { + err = storage.ErrDuplicateSampleForTimestamp + } else { + if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil { + ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. + // CT is an experimental feature. For now, we don't need to fail the + // scrape on errors updating the created timestamp, log debug. + _ = level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) + } + } + + if isHistogram && sl.enableNativeHistogramIngestion { + if h != nil { + ref, err = app.AppendHistogram(ref, lset, t, h, nil) + } else { + ref, err = app.AppendHistogram(ref, lset, t, nil, fh) + } + } else { + ref, err = app.Append(ref, lset, t, val) + } + } + + if err == nil { + if (parsedTimestamp == nil || sl.trackTimestampsStaleness) && ce != nil { + sl.cache.trackStaleness(ce.hash, ce.lset) + } + } + + sampleAdded, err = sl.checkAddError(met, err, &sampleLimitErr, &bucketLimitErr, &appErrs) + if err != nil { + if !errors.Is(err, storage.ErrNotFound) { + _ = level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) + } + break loop + } + + if !ok { + if parsedTimestamp == nil || sl.trackTimestampsStaleness { + // Bypass staleness logic if there is an explicit timestamp. + sl.cache.trackStaleness(hash, lset) + } + sl.cache.addRef(met, ref, lset, hash) + if sampleAdded && sampleLimitErr == nil && bucketLimitErr == nil { + seriesAdded++ + } + } + + // Increment added even if there's an error so we correctly report the + // number of samples remaining after relabeling. + // We still report duplicated samples here since this number should be the exact number + // of time series exposed on a scrape after relabelling. + added++ + exemplars = exemplars[:0] // Reset and reuse the exemplar slice. + for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) { + if !e.HasTs { + if isHistogram { + // We drop exemplars for native histograms if they don't have a timestamp. + // Missing timestamps are deliberately not supported as we want to start + // enforcing timestamps for exemplars as otherwise proper deduplication + // is inefficient and purely based on heuristics: we cannot distinguish + // between repeated exemplars and new instances with the same values. + // This is done silently without logs as it is not an error but out of spec. + // This does not affect classic histograms so that behaviour is unchanged. + e = exemplar.Exemplar{} // Reset for next time round loop. + continue + } + e.Ts = t + } + exemplars = append(exemplars, e) + e = exemplar.Exemplar{} // Reset for next time round loop. + } + // Sort so that checking for duplicates / out of order is more efficient during validation. + slices.SortFunc(exemplars, exemplar.Compare) + outOfOrderExemplars := 0 + for _, e := range exemplars { + _, exemplarErr := app.AppendExemplar(ref, lset, e) + switch { + case exemplarErr == nil: + // Do nothing. + case errors.Is(exemplarErr, storage.ErrOutOfOrderExemplar): + outOfOrderExemplars++ + default: + // Since exemplar storage is still experimental, we don't fail the scrape on ingestion errors. + level.Debug(sl.l).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr) // nolint + } + } + if outOfOrderExemplars > 0 && outOfOrderExemplars == len(exemplars) { + // Only report out of order exemplars if all are out of order, otherwise this was a partial update + // to some existing set of exemplars. + appErrs.numExemplarOutOfOrder += outOfOrderExemplars + level.Debug(sl.l).Log("msg", "Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1])) // nolint + sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars)) + } + + if sl.appendMetadataToWAL && metadataChanged { + if _, merr := app.UpdateMetadata(ref, lset, meta); merr != nil { + // No need to fail the scrape on errors appending metadata. + level.Debug(sl.l).Log("msg", "Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr) // nolint + } + } + } + if sampleLimitErr != nil { + if err == nil { + err = sampleLimitErr + } + // We only want to increment this once per scrape, so this is Inc'd outside the loop. + sl.metrics.targetScrapeSampleLimit.Inc() + } + if bucketLimitErr != nil { + if err == nil { + err = bucketLimitErr // If sample limit is hit, that error takes precedence. + } + // We only want to increment this once per scrape, so this is Inc'd outside the loop. + sl.metrics.targetScrapeNativeHistogramBucketLimit.Inc() + } + if appErrs.numOutOfOrder > 0 { + level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder) // nolint + } + if appErrs.numDuplicates > 0 { + level.Warn(sl.l).Log("msg", "Error on ingesting samples with different value but same timestamp", "num_dropped", appErrs.numDuplicates) // nolint + } + if appErrs.numOutOfBounds > 0 { + level.Warn(sl.l).Log("msg", "Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds) // nolint + } + if appErrs.numExemplarOutOfOrder > 0 { + level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder) // nolint + } + if err == nil { + sl.cache.forEachStale(func(lset labels.Labels) bool { + // Series no longer exposed, mark it stale. + _, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN)) + switch { + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): + // Do not count these in logging, as this is expected if a target + // goes away and comes back again with a new scrape loop. + err = nil + } + return err == nil + }) + } + return +} + +// Adds samples to the appender, checking the error, and then returns the # of samples added, +// whether the caller should continue to process more samples, and any sample or bucket limit errors. +func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) { + switch { + case err == nil: + return true, nil + case errors.Is(err, storage.ErrNotFound): + return false, storage.ErrNotFound + case errors.Is(err, storage.ErrOutOfOrderSample): + appErrs.numOutOfOrder++ + level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met)) // nolint + sl.metrics.targetScrapeSampleOutOfOrder.Inc() + return false, nil + case errors.Is(err, storage.ErrDuplicateSampleForTimestamp): + appErrs.numDuplicates++ + level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met)) // nolint + sl.metrics.targetScrapeSampleDuplicate.Inc() + return false, nil + case errors.Is(err, storage.ErrOutOfBounds): + appErrs.numOutOfBounds++ + level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met)) // nolint + sl.metrics.targetScrapeSampleOutOfBounds.Inc() + return false, nil + case errors.Is(err, errSampleLimit): + // Keep on parsing output if we hit the limit, so we report the correct + // total number of samples scraped. + *sampleLimitErr = err + return false, nil + case errors.Is(err, errBucketLimit): + // Keep on parsing output if we hit the limit, so we report the correct + // total number of samples scraped. + *bucketLimitErr = err + return false, nil + default: + return false, err + } +} + +// The constants are suffixed with the invalid \xff unicode rune to avoid collisions +// with scraped metrics in the cache. +var ( + scrapeHealthMetricName = []byte("up" + "\xff") + scrapeDurationMetricName = []byte("scrape_duration_seconds" + "\xff") + scrapeSamplesMetricName = []byte("scrape_samples_scraped" + "\xff") + samplesPostRelabelMetricName = []byte("scrape_samples_post_metric_relabeling" + "\xff") + scrapeSeriesAddedMetricName = []byte("scrape_series_added" + "\xff") + scrapeTimeoutMetricName = []byte("scrape_timeout_seconds" + "\xff") + scrapeSampleLimitMetricName = []byte("scrape_sample_limit" + "\xff") + scrapeBodySizeBytesMetricName = []byte("scrape_body_size_bytes" + "\xff") +) + +func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) { + sl.scraper.Report(start, duration, scrapeErr) + + ts := timestamp.FromTime(start) + + var health float64 + if scrapeErr == nil { + health = 1 + } + b := labels.NewBuilderWithSymbolTable(sl.symbolTable) + + if err = sl.addReportSample(app, scrapeHealthMetricName, ts, health, b); err != nil { + return + } + if err = sl.addReportSample(app, scrapeDurationMetricName, ts, duration.Seconds(), b); err != nil { + return + } + if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, float64(scraped), b); err != nil { + return + } + if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, float64(added), b); err != nil { + return + } + if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, float64(seriesAdded), b); err != nil { + return + } + if sl.reportExtraMetrics { + if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, sl.timeout.Seconds(), b); err != nil { + return + } + if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, float64(sl.sampleLimit), b); err != nil { + return + } + if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, float64(bytes), b); err != nil { + return + } + } + return +} + +func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err error) { + ts := timestamp.FromTime(start) + + stale := math.Float64frombits(value.StaleNaN) + b := labels.NewBuilder(labels.EmptyLabels()) + + if err = sl.addReportSample(app, scrapeHealthMetricName, ts, stale, b); err != nil { + return + } + if err = sl.addReportSample(app, scrapeDurationMetricName, ts, stale, b); err != nil { + return + } + if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, stale, b); err != nil { + return + } + if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, stale, b); err != nil { + return + } + if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, stale, b); err != nil { + return + } + if sl.reportExtraMetrics { + if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, stale, b); err != nil { + return + } + if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, stale, b); err != nil { + return + } + if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, stale, b); err != nil { + return + } + } + return +} + +func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v float64, b *labels.Builder) error { + ce, ok, _ := sl.cache.get(s) + var ref storage.SeriesRef + var lset labels.Labels + if ok { + ref = ce.ref + lset = ce.lset + } else { + // The constants are suffixed with the invalid \xff unicode rune to avoid collisions + // with scraped metrics in the cache. + // We have to drop it when building the actual metric. + b.Reset(labels.EmptyLabels()) + b.Set(labels.MetricName, string(s[:len(s)-1])) + lset = sl.reportSampleMutator(b.Labels()) + } + + ref, err := app.Append(ref, lset, t, v) + switch { + case err == nil: + if !ok { + sl.cache.addRef(s, ref, lset, lset.Hash()) + } + return nil + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): + // Do not log here, as this is expected if a target goes away and comes back + // again with a new scrape loop. + return nil + default: + return err + } +} + +// zeroConfig returns a new scrape config that only contains configuration items +// that alter metrics. +func zeroConfig(c *config.ScrapeConfig) *config.ScrapeConfig { + z := *c + // We zero out the fields that for sure don't affect scrape. + z.ScrapeInterval = 0 + z.ScrapeTimeout = 0 + z.SampleLimit = 0 + z.HTTPClientConfig = config_util.HTTPClientConfig{} + return &z +} + +// reusableCache compares two scrape config and tells whether the cache is still +// valid. +func reusableCache(r, l *config.ScrapeConfig) bool { + if r == nil || l == nil { + return false + } + return reflect.DeepEqual(zeroConfig(r), zeroConfig(l)) +} + +// CtxKey is a dedicated type for keys of context-embedded values propagated +// with the scrape context. +type ctxKey int + +// Valid CtxKey values. +const ( + ctxKeyMetadata ctxKey = iota + 1 + ctxKeyTarget +) + +func ContextWithMetricMetadataStore(ctx context.Context, s MetricMetadataStore) context.Context { + return context.WithValue(ctx, ctxKeyMetadata, s) +} + +func MetricMetadataStoreFromContext(ctx context.Context) (MetricMetadataStore, bool) { + s, ok := ctx.Value(ctxKeyMetadata).(MetricMetadataStore) + return s, ok +} + +func ContextWithTarget(ctx context.Context, t *Target) context.Context { + return context.WithValue(ctx, ctxKeyTarget, t) +} + +func TargetFromContext(ctx context.Context) (*Target, bool) { + t, ok := ctx.Value(ctxKeyTarget).(*Target) + return t, ok +} + +func pickSchema(bucketFactor float64) int32 { + if bucketFactor <= 1 { + bucketFactor = 1.00271 + } + floor := math.Floor(-math.Log2(math.Log2(bucketFactor))) + switch { + case floor >= float64(histogram.ExponentialSchemaMax): + return histogram.ExponentialSchemaMax + case floor <= float64(histogram.ExponentialSchemaMin): + return histogram.ExponentialSchemaMin + default: + return int32(floor) + } +} + +// Scraper implementation that fetches metrics data from Gatherer http.Handler. +type gathererScraper struct { + *targetScraper + h http.Handler +} + +type scrapeResult struct { + resp *http.Response + err error +} + +func (gs *gathererScraper) scrape(ctx context.Context) (*http.Response, error) { + resCh := make(chan scrapeResult, 1) + go func() { + defer close(resCh) + req, err := gs.scrapeRequest() + if err != nil { + resCh <- scrapeResult{nil, err} + return + } + w := newResponseWriter(req) + if gs.h != nil { + gs.h.ServeHTTP(w, req) + } + fmt.Println("[gathererScraper] scraping metrics") + resCh <- scrapeResult{w.response, nil} + }() + select { + case <-ctx.Done(): + return nil, ctx.Err() + case r := <-resCh: + return r.resp, r.err + } +} + +type responseWriter struct { + http.ResponseWriter + response *http.Response + // Writes to response body + w io.Writer +} + +func newResponseWriter(req *http.Request) *responseWriter { + buf := new(bytes.Buffer) + + return &responseWriter{ + w: io.Writer(buf), + response: &http.Response{ + Status: http.StatusText(http.StatusOK), + StatusCode: http.StatusOK, + Header: make(http.Header), + Body: io.NopCloser(buf), + Request: req, + }, + } +} + +func (rw *responseWriter) Header() http.Header { + return rw.response.Header +} + +func (rw *responseWriter) Write(data []byte) (int, error) { + return rw.w.Write(data) +} + +func (rw *responseWriter) WriteHeader(statusCode int) { + rw.response.StatusCode = statusCode + rw.response.Status = fmt.Sprintf("%d %s", statusCode, http.StatusText(statusCode)) +} + +var ( + defaultGathererHandler atomic.Pointer[http.Handler] + + defaultGatherer atomic.Pointer[prometheus.Gatherer] +) + +// This enables scraper to read metrics from the handler directly without making HTTP request +func SetDefaultGathererHandler(h http.Handler) { + defaultGathererHandler.Store(&h) +} + +func SetDefaultGatherer(g prometheus.Gatherer) { + defaultGatherer.Store(&g) + SetDefaultGathererHandler(promhttp.HandlerFor(g, promhttp.HandlerOpts{})) +} + +func GetDefaultGathererHandler() http.Handler { + if h := defaultGathererHandler.Load(); h != nil { + return *h + } + return nil +} + +func GetDefaultGatherer() prometheus.Gatherer { + if g := defaultGatherer.Load(); g != nil { + return *g + } + return nil +} diff --git a/pkg/promotel/internal/prometheus/scrape/scrape_test.go b/pkg/promotel/internal/prometheus/scrape/scrape_test.go new file mode 100644 index 0000000000..c4fd31a1bc --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/scrape_test.go @@ -0,0 +1,2559 @@ +package scrape + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/gogo/protobuf/proto" + "github.com/google/go-cmp/cmp" + "github.com/prometheus/client_golang/prometheus" + prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + dto "github.com/prometheus/client_model/go" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/teststorage" + "github.com/prometheus/prometheus/util/testutil" +) + +func TestMain(m *testing.M) { + testutil.TolerantVerifyLeak(m) +} + +func newTestScrapeMetrics(t testing.TB) *scrapeMetrics { + reg := prometheus.NewRegistry() + metrics, err := newScrapeMetrics(reg) + require.NoError(t, err) + return metrics +} + +func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app func(ctx context.Context) storage.Appender, interval time.Duration) *scrapeLoop { // nolint + return newScrapeLoop(ctx, + scraper, + nil, nil, + nopMutator, + nopMutator, + app, + nil, + labels.NewSymbolTable(), + 0, + true, + false, + true, + 0, 0, histogram.ExponentialSchemaMax, + nil, + interval, + time.Hour, + false, + false, + false, + false, + false, + nil, + false, + newTestScrapeMetrics(t), + false, + ) +} + +func TestScrapeLoopStopBeforeRun(t *testing.T) { + scraper := &scraperShim{} + sl := newBasicScrapeLoop(t, context.Background(), scraper, nil, 1) + + // The scrape pool synchronizes on stopping scrape loops. However, new scrape + // loops are started asynchronously. Thus it's possible, that a loop is stopped + // again before having started properly. + // Stopping not-yet-started loops must block until the run method was called and exited. + // The run method must exit immediately. + + stopDone := make(chan struct{}) + go func() { + sl.stop() + close(stopDone) + }() + + select { + case <-stopDone: + require.FailNow(t, "Stopping terminated before run exited successfully.") + case <-time.After(500 * time.Millisecond): + } + + // Running the scrape loop must exit before calling the scraper even once. + scraper.scrapeFunc = func(context.Context, io.Writer) error { + require.FailNow(t, "Scraper was called for terminated scrape loop.") + return nil + } + + runDone := make(chan struct{}) + go func() { + sl.run(nil) + close(runDone) + }() + + select { + case <-runDone: + case <-time.After(1 * time.Second): + require.FailNow(t, "Running terminated scrape loop did not exit.") + } + + select { + case <-stopDone: + case <-time.After(1 * time.Second): + require.FailNow(t, "Stopping did not terminate after running exited.") + } +} + +func nopMutator(l labels.Labels) labels.Labels { return l } + +func TestScrapeLoopStop(t *testing.T) { + var ( + signal = make(chan struct{}, 1) + appender = &collectResultAppender{} + scraper = &scraperShim{} + app = func(ctx context.Context) storage.Appender { return appender } + ) + + sl := newBasicScrapeLoop(t, context.Background(), scraper, app, 10*time.Millisecond) + + // Terminate loop after 2 scrapes. + numScrapes := 0 + + scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + numScrapes++ + if numScrapes == 2 { + go sl.stop() + <-sl.ctx.Done() + } + _, _ = w.Write([]byte("metric_a 42\n")) + return ctx.Err() + } + + go func() { + sl.run(nil) + signal <- struct{}{} + }() + + select { + case <-signal: + case <-time.After(5 * time.Second): + require.FailNow(t, "Scrape wasn't stopped.") + } + + // We expected 1 actual sample for each scrape plus 5 for report samples. + // At least 2 scrapes were made, plus the final stale markers. + require.GreaterOrEqual(t, len(appender.resultFloats), 6*3, "Expected at least 3 scrapes with 6 samples each.") + require.Zero(t, len(appender.resultFloats)%6, "There is a scrape with missing samples.") + // All samples in a scrape must have the same timestamp. + var ts int64 + for i, s := range appender.resultFloats { + switch { + case i%6 == 0: + ts = s.t + case s.t != ts: + t.Fatalf("Unexpected multiple timestamps within single scrape") + } + } + // All samples from the last scrape must be stale markers. + for _, s := range appender.resultFloats[len(appender.resultFloats)-5:] { + require.True(t, value.IsStaleNaN(s.f), "Appended last sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(s.f)) + } +} + +func TestScrapeLoopRun(t *testing.T) { + var ( + signal = make(chan struct{}, 1) + errc = make(chan error) + + scraper = &scraperShim{} + app = func(ctx context.Context) storage.Appender { return &nopAppender{} } + scrapeMetrics = newTestScrapeMetrics(t) + ) + + ctx, cancel := context.WithCancel(context.Background()) + sl := newScrapeLoop(ctx, + scraper, + nil, nil, + nopMutator, + nopMutator, + app, + nil, + nil, + 0, + true, + false, + true, + 0, 0, histogram.ExponentialSchemaMax, + nil, + time.Second, + time.Hour, + false, + false, + false, + false, + false, + nil, + false, + scrapeMetrics, + false, + ) + + // The loop must terminate during the initial offset if the context + // is canceled. + scraper.offsetDur = time.Hour + + go func() { + sl.run(errc) + signal <- struct{}{} + }() + + // Wait to make sure we are actually waiting on the offset. + time.Sleep(1 * time.Second) + + cancel() + select { + case <-signal: + case <-time.After(5 * time.Second): + require.FailNow(t, "Cancellation during initial offset failed.") + case err := <-errc: + require.FailNow(t, "Unexpected error: %s", err) + } + + // The provided timeout must cause cancellation of the context passed down to the + // scraper. The scraper has to respect the context. + scraper.offsetDur = 0 + + block := make(chan struct{}) + scraper.scrapeFunc = func(ctx context.Context, _ io.Writer) error { + select { + case <-block: + case <-ctx.Done(): + return ctx.Err() + } + return nil + } + + ctx, cancel = context.WithCancel(context.Background()) + sl = newBasicScrapeLoop(t, ctx, scraper, app, time.Second) + sl.timeout = 100 * time.Millisecond + + go func() { + sl.run(errc) + signal <- struct{}{} + }() + + select { + case err := <-errc: + require.ErrorIs(t, err, context.DeadlineExceeded) + case <-time.After(3 * time.Second): + require.FailNow(t, "Expected timeout error but got none.") + } + + // We already caught the timeout error and are certainly in the loop. + // Let the scrapes returns immediately to cause no further timeout errors + // and check whether canceling the parent context terminates the loop. + close(block) + cancel() + + select { + case <-signal: + // Loop terminated as expected. + case err := <-errc: + require.FailNow(t, "Unexpected error: %s", err) + case <-time.After(3 * time.Second): + require.FailNow(t, "Loop did not terminate on context cancellation") + } +} + +func TestScrapeLoopForcedErr(t *testing.T) { + var ( + signal = make(chan struct{}, 1) + errc = make(chan error) + + scraper = &scraperShim{} + app = func(ctx context.Context) storage.Appender { return &nopAppender{} } + ) + + ctx, cancel := context.WithCancel(context.Background()) + sl := newBasicScrapeLoop(t, ctx, scraper, app, time.Second) + + forcedErr := errors.New("forced err") + sl.setForcedError(forcedErr) + + scraper.scrapeFunc = func(context.Context, io.Writer) error { + require.FailNow(t, "Should not be scraped.") + return nil + } + + go func() { + sl.run(errc) + signal <- struct{}{} + }() + + select { + case err := <-errc: + require.ErrorIs(t, err, forcedErr) + case <-time.After(3 * time.Second): + require.FailNow(t, "Expected forced error but got none.") + } + cancel() + + select { + case <-signal: + case <-time.After(5 * time.Second): + require.FailNow(t, "Scrape not stopped.") + } +} + +func TestScrapeLoopMetadata(t *testing.T) { + var ( + signal = make(chan struct{}) + scraper = &scraperShim{} + scrapeMetrics = newTestScrapeMetrics(t) + cache = newScrapeCache(scrapeMetrics) + ) + defer close(signal) + + ctx, cancel := context.WithCancel(context.Background()) + sl := newScrapeLoop(ctx, + scraper, + nil, nil, + nopMutator, + nopMutator, + func(ctx context.Context) storage.Appender { return nopAppender{} }, + cache, + labels.NewSymbolTable(), + 0, + true, + false, + true, + 0, 0, histogram.ExponentialSchemaMax, + nil, + 0, + 0, + false, + false, + false, + false, + false, + nil, + false, + scrapeMetrics, + false, + ) + defer cancel() + + slApp := sl.appender(ctx) + total, _, _, err := sl.append(slApp, []byte(`# TYPE test_metric counter +# HELP test_metric some help text +# UNIT test_metric metric +test_metric 1 +# TYPE test_metric_no_help gauge +# HELP test_metric_no_type other help text +# EOF`), "application/openmetrics-text", time.Now()) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + require.Equal(t, 1, total) + + md, ok := cache.GetMetadata("test_metric") + require.True(t, ok, "expected metadata to be present") + require.Equal(t, model.MetricTypeCounter, md.Type, "unexpected metric type") + require.Equal(t, "some help text", md.Help) + require.Equal(t, "metric", md.Unit) + + md, ok = cache.GetMetadata("test_metric_no_help") + require.True(t, ok, "expected metadata to be present") + require.Equal(t, model.MetricTypeGauge, md.Type, "unexpected metric type") + require.Equal(t, "", md.Help) + require.Equal(t, "", md.Unit) + + md, ok = cache.GetMetadata("test_metric_no_type") + require.True(t, ok, "expected metadata to be present") + require.Equal(t, model.MetricTypeUnknown, md.Type, "unexpected metric type") + require.Equal(t, "other help text", md.Help) + require.Equal(t, "", md.Unit) +} + +func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) { + // Need a full storage for correct Add/AddFast semantics. + s := teststorage.New(t) + t.Cleanup(func() { s.Close() }) + + ctx, cancel := context.WithCancel(context.Background()) + sl := newBasicScrapeLoop(t, ctx, &scraperShim{}, s.Appender, 0) + t.Cleanup(func() { cancel() }) + + return ctx, sl +} + +func TestScrapeLoopSeriesAdded(t *testing.T) { + ctx, sl := simpleTestScrapeLoop(t) + + slApp := sl.appender(ctx) + total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{}) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + require.Equal(t, 1, total) + require.Equal(t, 1, added) + require.Equal(t, 1, seriesAdded) + + slApp = sl.appender(ctx) + total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{}) + require.NoError(t, slApp.Commit()) + require.NoError(t, err) + require.Equal(t, 1, total) + require.Equal(t, 1, added) + require.Equal(t, 0, seriesAdded) +} + +func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) { + s := teststorage.New(t) + defer s.Close() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + target := &Target{ + labels: labels.FromStrings("pod_label_invalid_012", "test"), + } + relabelConfig := []*relabel.Config{{ + Action: relabel.LabelMap, + Regex: relabel.MustNewRegexp("pod_label_invalid_(.+)"), + Separator: ";", + Replacement: "$1", + }} + sl := newBasicScrapeLoop(t, ctx, &scraperShim{}, s.Appender, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, target, true, relabelConfig) + } + + slApp := sl.appender(ctx) + total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{}) + require.ErrorContains(t, err, "invalid metric name or label names") + require.NoError(t, slApp.Rollback()) + require.Equal(t, 1, total) + require.Equal(t, 0, added) + require.Equal(t, 0, seriesAdded) +} + +func makeTestMetrics(n int) []byte { + // Construct a metrics string to parse + sb := bytes.Buffer{} + for i := 0; i < n; i++ { + fmt.Fprintf(&sb, "# TYPE metric_a gauge\n") + fmt.Fprintf(&sb, "# HELP metric_a help text\n") + fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100) + } + fmt.Fprintf(&sb, "# EOF\n") + return sb.Bytes() +} + +func BenchmarkScrapeLoopAppend(b *testing.B) { + ctx, sl := simpleTestScrapeLoop(b) + + slApp := sl.appender(ctx) + metrics := makeTestMetrics(100) + ts := time.Time{} + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + ts = ts.Add(time.Second) + _, _, _, _ = sl.append(slApp, metrics, "", ts) + } +} + +func BenchmarkScrapeLoopAppendOM(b *testing.B) { + ctx, sl := simpleTestScrapeLoop(b) + + slApp := sl.appender(ctx) + metrics := makeTestMetrics(100) + ts := time.Time{} + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + ts = ts.Add(time.Second) + _, _, _, _ = sl.append(slApp, metrics, "application/openmetrics-text", ts) + } +} + +func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { + appender := &collectResultAppender{} + var ( + signal = make(chan struct{}, 1) + scraper = &scraperShim{} + app = func(ctx context.Context) storage.Appender { return appender } + ) + + ctx, cancel := context.WithCancel(context.Background()) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + // Succeed once, several failures, then stop. + numScrapes := 0 + + scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + numScrapes++ + + switch numScrapes { + case 1: + _, _ = w.Write([]byte("metric_a 42\n")) + return nil + case 5: + cancel() + } + return errors.New("scrape failed") + } + + go func() { + sl.run(nil) + signal <- struct{}{} + }() + + select { + case <-signal: + case <-time.After(5 * time.Second): + require.FailNow(t, "Scrape wasn't stopped.") + } + + // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for + // each scrape successful or not. + require.Len(t, appender.resultFloats, 27, "Appended samples not as expected:\n%s", appender) + require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") // nolint + require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), + "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) +} + +func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { + appender := &collectResultAppender{} + var ( + signal = make(chan struct{}, 1) + scraper = &scraperShim{} + app = func(ctx context.Context) storage.Appender { return appender } + numScrapes = 0 + ) + + ctx, cancel := context.WithCancel(context.Background()) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + + // Succeed once, several failures, then stop. + scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + numScrapes++ + switch numScrapes { + case 1: + _, _ = w.Write([]byte("metric_a 42\n")) + return nil + case 2: + _, _ = w.Write([]byte("7&-\n")) + return nil + case 3: + cancel() + } + return errors.New("scrape failed") + } + + go func() { + sl.run(nil) + signal <- struct{}{} + }() + + select { + case <-signal: + case <-time.After(5 * time.Second): + require.FailNow(t, "Scrape wasn't stopped.") + } + + // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for + // each scrape successful or not. + require.Len(t, appender.resultFloats, 17, "Appended samples not as expected:\n%s", appender) + require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") // nolint + require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), + "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) +} + +func TestScrapeLoopCache(t *testing.T) { + s := teststorage.New(t) + defer s.Close() + + appender := &collectResultAppender{} + var ( + signal = make(chan struct{}, 1) + scraper = &scraperShim{} + app = func(ctx context.Context) storage.Appender { appender.next = s.Appender(ctx); return appender } + ) + + ctx, cancel := context.WithCancel(context.Background()) + // Decreasing the scrape interval could make the test fail, as multiple scrapes might be initiated at identical millisecond timestamps. + // See https://github.com/prometheus/prometheus/issues/12727. + sl := newBasicScrapeLoop(t, ctx, scraper, app, 100*time.Millisecond) + + numScrapes := 0 + + scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + switch numScrapes { + case 1, 2: + _, ok := sl.cache.series["metric_a"] + require.True(t, ok, "metric_a missing from cache after scrape %d", numScrapes) + _, ok = sl.cache.series["metric_b"] + require.True(t, ok, "metric_b missing from cache after scrape %d", numScrapes) + case 3: + _, ok := sl.cache.series["metric_a"] + require.True(t, ok, "metric_a missing from cache after scrape %d", numScrapes) + _, ok = sl.cache.series["metric_b"] + require.False(t, ok, "metric_b present in cache after scrape %d", numScrapes) + } + + numScrapes++ + switch numScrapes { + case 1: + _, _ = w.Write([]byte("metric_a 42\nmetric_b 43\n")) + return nil + case 3: + _, _ = w.Write([]byte("metric_a 44\n")) + return nil + case 4: + cancel() + } + return errors.New("scrape failed") + } + + go func() { + sl.run(nil) + signal <- struct{}{} + }() + + select { + case <-signal: + case <-time.After(5 * time.Second): + require.FailNow(t, "Scrape wasn't stopped.") + } + + // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for + // each scrape successful or not. + require.Len(t, appender.resultFloats, 26, "Appended samples not as expected:\n%s", appender) +} + +func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { + s := teststorage.New(t) + defer s.Close() + + sapp := s.Appender(context.Background()) + + appender := &collectResultAppender{next: sapp} + var ( + signal = make(chan struct{}, 1) + scraper = &scraperShim{} + app = func(ctx context.Context) storage.Appender { return appender } + ) + + ctx, cancel := context.WithCancel(context.Background()) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + + numScrapes := 0 + + scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + numScrapes++ + if numScrapes < 5 { + s := "" + for i := 0; i < 500; i++ { + s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes) + } + _, _ = w.Write([]byte(s + "&")) + } else { + cancel() + } + return nil + } + + go func() { + sl.run(nil) + signal <- struct{}{} + }() + + select { + case <-signal: + case <-time.After(5 * time.Second): + require.FailNow(t, "Scrape wasn't stopped.") + } + + require.LessOrEqual(t, len(sl.cache.series), 2000, "More than 2000 series cached.") +} + +func TestScrapeLoopAppend(t *testing.T) { + tests := []struct { + title string + honorLabels bool + scrapeLabels string + discoveryLabels []string + expLset labels.Labels + expValue float64 + }{ + { + // When "honor_labels" is not set + // label name collision is handler by adding a prefix. + title: "Label name collision", + honorLabels: false, + scrapeLabels: `metric{n="1"} 0`, + discoveryLabels: []string{"n", "2"}, + expLset: labels.FromStrings("__name__", "metric", "exported_n", "1", "n", "2"), + expValue: 0, + }, { + // When "honor_labels" is not set + // exported label from discovery don't get overwritten + title: "Label name collision", + honorLabels: false, + scrapeLabels: `metric 0`, + discoveryLabels: []string{"n", "2", "exported_n", "2"}, + expLset: labels.FromStrings("__name__", "metric", "n", "2", "exported_n", "2"), + expValue: 0, + }, { + // Labels with no value need to be removed as these should not be ingested. + title: "Delete Empty labels", + honorLabels: false, + scrapeLabels: `metric{n=""} 0`, + discoveryLabels: nil, + expLset: labels.FromStrings("__name__", "metric"), + expValue: 0, + }, { + // Honor Labels should ignore labels with the same name. + title: "Honor Labels", + honorLabels: true, + scrapeLabels: `metric{n1="1", n2="2"} 0`, + discoveryLabels: []string{"n1", "0"}, + expLset: labels.FromStrings("__name__", "metric", "n1", "1", "n2", "2"), + expValue: 0, + }, { + title: "Stale - NaN", + honorLabels: false, + scrapeLabels: `metric NaN`, + discoveryLabels: nil, + expLset: labels.FromStrings("__name__", "metric"), + expValue: math.Float64frombits(value.NormalNaN), + }, + } + + for _, test := range tests { + app := &collectResultAppender{} + + discoveryLabels := &Target{ + labels: labels.FromStrings(test.discoveryLabels...), + } + + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, discoveryLabels, test.honorLabels, nil) + } + sl.reportSampleMutator = func(l labels.Labels) labels.Labels { + return mutateReportSampleLabels(l, discoveryLabels) + } + + now := time.Now() + + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "", now) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + expected := []floatSample{ + { + metric: test.expLset, + t: timestamp.FromTime(now), + f: test.expValue, + }, + } + + t.Logf("Test:%s", test.title) + requireEqual(t, expected, app.resultFloats) + } +} + +func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) { + testutil.RequireEqualWithOptions(t, expected, actual, + []cmp.Option{cmp.Comparer(equalFloatSamples), cmp.AllowUnexported(histogramSample{})}, + msgAndArgs...) +} + +func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { + testcases := map[string]struct { + targetLabels []string + exposedLabels string + expected []string + }{ + "One target label collides with existing label": { + targetLabels: []string{"foo", "2"}, + exposedLabels: `metric{foo="1"} 0`, + expected: []string{"__name__", "metric", "exported_foo", "1", "foo", "2"}, + }, + + "One target label collides with existing label, plus target label already with prefix 'exported'": { + targetLabels: []string{"foo", "2", "exported_foo", "3"}, + exposedLabels: `metric{foo="1"} 0`, + expected: []string{"__name__", "metric", "exported_exported_foo", "1", "exported_foo", "3", "foo", "2"}, + }, + "One target label collides with existing label, plus existing label already with prefix 'exported": { + targetLabels: []string{"foo", "3"}, + exposedLabels: `metric{foo="1", exported_foo="2"} 0`, + expected: []string{"__name__", "metric", "exported_exported_foo", "1", "exported_foo", "2", "foo", "3"}, + }, + "One target label collides with existing label, both already with prefix 'exported'": { + targetLabels: []string{"exported_foo", "2"}, + exposedLabels: `metric{exported_foo="1"} 0`, + expected: []string{"__name__", "metric", "exported_exported_foo", "1", "exported_foo", "2"}, + }, + "Two target labels collide with existing labels, both with and without prefix 'exported'": { + targetLabels: []string{"foo", "3", "exported_foo", "4"}, + exposedLabels: `metric{foo="1", exported_foo="2"} 0`, + expected: []string{ + "__name__", "metric", "exported_exported_foo", "1", "exported_exported_exported_foo", + "2", "exported_foo", "4", "foo", "3", + }, + }, + "Extreme example": { + targetLabels: []string{"foo", "0", "exported_exported_foo", "1", "exported_exported_exported_foo", "2"}, + exposedLabels: `metric{foo="3", exported_foo="4", exported_exported_exported_foo="5"} 0`, + expected: []string{ + "__name__", "metric", + "exported_exported_exported_exported_exported_foo", "5", + "exported_exported_exported_exported_foo", "3", + "exported_exported_exported_foo", "2", + "exported_exported_foo", "1", + "exported_foo", "4", + "foo", "0", + }, + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + app := &collectResultAppender{} + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil) + } + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) + require.NoError(t, err) + + require.NoError(t, slApp.Commit()) + + requireEqual(t, []floatSample{ + { + metric: labels.FromStrings(tc.expected...), + t: timestamp.FromTime(time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)), + f: 0, + }, + }, app.resultFloats) + }) + } +} + +func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { + // collectResultAppender's AddFast always returns ErrNotFound if we don't give it a next. + app := &collectResultAppender{} + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + + fakeRef := storage.SeriesRef(1) + expValue := float64(1) + metric := []byte(`metric{n="1"} 1`) + p, warning := textparse.New(metric, "", false, labels.NewSymbolTable()) + require.NoError(t, warning) + + var lset labels.Labels + _, _ = p.Next() + p.Metric(&lset) + hash := lset.Hash() + + // Create a fake entry in the cache + sl.cache.addRef(metric, fakeRef, lset, hash) + now := time.Now() + + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, metric, "", now) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + expected := []floatSample{ + { + metric: lset, + t: timestamp.FromTime(now), + f: expValue, + }, + } + + require.Equal(t, expected, app.resultFloats) +} + +func TestScrapeLoopAppendSampleLimit(t *testing.T) { + resApp := &collectResultAppender{} + app := &limitAppender{Appender: resApp, limit: 1} + + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + if l.Has("deleteme") { + return labels.EmptyLabels() + } + return l + } + sl.sampleLimit = app.limit + + // Get the value of the Counter before performing the append. + beforeMetric := dto.Metric{} + err := sl.metrics.targetScrapeSampleLimit.Write(&beforeMetric) + require.NoError(t, err) + + beforeMetricValue := beforeMetric.GetCounter().GetValue() + + now := time.Now() + slApp := sl.appender(context.Background()) + total, added, seriesAdded, err := sl.append(app, []byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), "", now) + require.ErrorIs(t, err, errSampleLimit) + require.NoError(t, slApp.Rollback()) + require.Equal(t, 3, total) + require.Equal(t, 3, added) + require.Equal(t, 1, seriesAdded) + + // Check that the Counter has been incremented a single time for the scrape, + // not multiple times for each sample. + metric := dto.Metric{} + err = sl.metrics.targetScrapeSampleLimit.Write(&metric) + require.NoError(t, err) + + value := metric.GetCounter().GetValue() + change := value - beforeMetricValue + require.Equal(t, 1.0, change, "Unexpected change of sample limit metric: %f", change) // nolint + + // And verify that we got the samples that fit under the limit. + want := []floatSample{ + { + metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), + t: timestamp.FromTime(now), + f: 1, + }, + } + requireEqual(t, want, resApp.rolledbackFloats, "Appended samples not as expected:\n%s", appender) + + now = time.Now() + slApp = sl.appender(context.Background()) + total, added, seriesAdded, err = sl.append(slApp, []byte("metric_a 1\nmetric_b 1\nmetric_c{deleteme=\"yes\"} 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h{deleteme=\"yes\"} 1\nmetric_i{deleteme=\"yes\"} 1\n"), "", now) + require.ErrorIs(t, err, errSampleLimit) + require.NoError(t, slApp.Rollback()) + require.Equal(t, 9, total) + require.Equal(t, 6, added) + require.Equal(t, 0, seriesAdded) +} + +func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { + resApp := &collectResultAppender{} + app := &bucketLimitAppender{Appender: resApp, limit: 2} + + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.enableNativeHistogramIngestion = true + sl.sampleMutator = func(l labels.Labels) labels.Labels { + if l.Has("deleteme") { + return labels.EmptyLabels() + } + return l + } + sl.sampleLimit = app.limit + + metric := dto.Metric{} + err := sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric) + require.NoError(t, err) + beforeMetricValue := metric.GetCounter().GetValue() + + nativeHistogram := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "testing", + Name: "example_native_histogram", + Help: "This is used for testing", + ConstLabels: map[string]string{"some": "value"}, + NativeHistogramBucketFactor: 1.1, // 10% increase from bucket to bucket + NativeHistogramMaxBucketNumber: 100, // intentionally higher than the limit we'll use in the scraper + }, + []string{"size"}, + ) + registry := prometheus.NewRegistry() + _ = registry.Register(nativeHistogram) + nativeHistogram.WithLabelValues("S").Observe(1.0) + nativeHistogram.WithLabelValues("M").Observe(1.0) + nativeHistogram.WithLabelValues("L").Observe(1.0) + nativeHistogram.WithLabelValues("M").Observe(10.0) + nativeHistogram.WithLabelValues("L").Observe(10.0) // in different bucket since > 1*1.1 + + gathered, err := registry.Gather() + require.NoError(t, err) + require.NotEmpty(t, gathered) + + histogramMetricFamily := gathered[0] + msg, err := MetricFamilyToProtobuf(histogramMetricFamily) + require.NoError(t, err) + + now := time.Now() + total, added, seriesAdded, err := sl.append(app, msg, "application/vnd.google.protobuf", now) + require.NoError(t, err) + require.Equal(t, 3, total) + require.Equal(t, 3, added) + require.Equal(t, 3, seriesAdded) + + err = sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric) + require.NoError(t, err) + metricValue := metric.GetCounter().GetValue() + require.Equal(t, beforeMetricValue, metricValue) // nolint + beforeMetricValue = metricValue + + nativeHistogram.WithLabelValues("L").Observe(100.0) // in different bucket since > 10*1.1 + + gathered, err = registry.Gather() + require.NoError(t, err) + require.NotEmpty(t, gathered) + + histogramMetricFamily = gathered[0] + msg, err = MetricFamilyToProtobuf(histogramMetricFamily) + require.NoError(t, err) + + now = time.Now() + total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now) + require.NoError(t, err) + require.Equal(t, 3, total) + require.Equal(t, 3, added) + require.Equal(t, 3, seriesAdded) + + err = sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric) + require.NoError(t, err) + metricValue = metric.GetCounter().GetValue() + require.Equal(t, beforeMetricValue, metricValue) // nolint + beforeMetricValue = metricValue + + nativeHistogram.WithLabelValues("L").Observe(100000.0) // in different bucket since > 10*1.1 + + gathered, err = registry.Gather() + require.NoError(t, err) + require.NotEmpty(t, gathered) + + histogramMetricFamily = gathered[0] + msg, err = MetricFamilyToProtobuf(histogramMetricFamily) + require.NoError(t, err) + + now = time.Now() + total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now) + if !errors.Is(err, errBucketLimit) { + t.Fatalf("Did not see expected histogram bucket limit error: %s", err) + } + require.NoError(t, app.Rollback()) + require.Equal(t, 3, total) + require.Equal(t, 3, added) + require.Equal(t, 0, seriesAdded) + + err = sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric) + require.NoError(t, err) + metricValue = metric.GetCounter().GetValue() + require.Equal(t, beforeMetricValue+1, metricValue) // nolint +} + +func TestScrapeLoop_ChangingMetricString(t *testing.T) { + // This is a regression test for the scrape loop cache not properly maintaining + // IDs when the string representation of a metric changes across a scrape. Thus + // we use a real storage appender here. + s := teststorage.New(t) + defer s.Close() + + capp := &collectResultAppender{} + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0) + + now := time.Now() + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "", now) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + slApp = sl.appender(context.Background()) + _, _, _, err = sl.append(slApp, []byte(`metric_a{b="1",a="1"} 2`), "", now.Add(time.Minute)) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + want := []floatSample{ + { + metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), + t: timestamp.FromTime(now), + f: 1, + }, + { + metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), + t: timestamp.FromTime(now.Add(time.Minute)), + f: 2, + }, + } + require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender) +} + +func TestScrapeLoopAppendStaleness(t *testing.T) { + app := &collectResultAppender{} + + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + + now := time.Now() + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte("metric_a 1\n"), "", now) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + slApp = sl.appender(context.Background()) + _, _, _, err = sl.append(slApp, []byte(""), "", now.Add(time.Second)) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + want := []floatSample{ + { + metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), + t: timestamp.FromTime(now), + f: 1, + }, + { + metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), + t: timestamp.FromTime(now.Add(time.Second)), + f: math.Float64frombits(value.StaleNaN), + }, + } + requireEqual(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender) +} + +func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { + app := &collectResultAppender{} + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + now := time.Now() + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "", now) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + slApp = sl.appender(context.Background()) + _, _, _, err = sl.append(slApp, []byte(""), "", now.Add(time.Second)) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + want := []floatSample{ + { + metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), + t: 1000, + f: 1, + }, + } + require.Equal(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender) +} + +func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) { + app := &collectResultAppender{} + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.trackTimestampsStaleness = true + + now := time.Now() + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "", now) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + slApp = sl.appender(context.Background()) + _, _, _, err = sl.append(slApp, []byte(""), "", now.Add(time.Second)) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + want := []floatSample{ + { + metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), + t: 1000, + f: 1, + }, + { + metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), + t: timestamp.FromTime(now.Add(time.Second)), + f: math.Float64frombits(value.StaleNaN), + }, + } + requireEqual(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender) +} + +func TestScrapeLoopAppendExemplar(t *testing.T) { + tests := []struct { + title string + scrapeClassicHistograms bool + enableNativeHistogramsIngestion bool + scrapeText string + contentType string + discoveryLabels []string + floats []floatSample + histograms []histogramSample + exemplars []exemplar.Exemplar + }{ + { + title: "Metric without exemplars", + scrapeText: "metric_total{n=\"1\"} 0\n# EOF", + contentType: "application/openmetrics-text", + discoveryLabels: []string{"n", "2"}, + floats: []floatSample{{ + metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), + f: 0, + }}, + }, + { + title: "Metric with exemplars", + scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0\n# EOF", + contentType: "application/openmetrics-text", + discoveryLabels: []string{"n", "2"}, + floats: []floatSample{{ + metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), + f: 0, + }}, + exemplars: []exemplar.Exemplar{ + {Labels: labels.FromStrings("a", "abc"), Value: 1}, + }, + }, + { + title: "Metric with exemplars and TS", + scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0 10000\n# EOF", + contentType: "application/openmetrics-text", + discoveryLabels: []string{"n", "2"}, + floats: []floatSample{{ + metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), + f: 0, + }}, + exemplars: []exemplar.Exemplar{ + {Labels: labels.FromStrings("a", "abc"), Value: 1, Ts: 10000000, HasTs: true}, + }, + }, + { + title: "Two metrics and exemplars", + scrapeText: `metric_total{n="1"} 1 # {t="1"} 1.0 10000 +metric_total{n="2"} 2 # {t="2"} 2.0 20000 +# EOF`, + contentType: "application/openmetrics-text", + floats: []floatSample{{ + metric: labels.FromStrings("__name__", "metric_total", "n", "1"), + f: 1, + }, { + metric: labels.FromStrings("__name__", "metric_total", "n", "2"), + f: 2, + }}, + exemplars: []exemplar.Exemplar{ + {Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true}, + {Labels: labels.FromStrings("t", "2"), Value: 2, Ts: 20000000, HasTs: true}, + }, + }, + { + title: "Native histogram with three exemplars", + + enableNativeHistogramsIngestion: true, + scrapeText: `name: "test_histogram" +help: "Test histogram with many buckets removed to keep it manageable in size." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + > + bucket: < + cumulative_count: 32 + upper_bound: -0.0001899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "58215" + > + value: -0.00019 + timestamp: < + seconds: 1625851055 + nanos: 146848599 + > + > + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +> + +`, + contentType: "application/vnd.google.protobuf", + histograms: []histogramSample{{ + t: 1234568, + h: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + }}, + exemplars: []exemplar.Exemplar{ + // Native histogram exemplars are arranged by timestamp, and those with missing timestamps are dropped. + {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}, + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, + }, + }, + { + title: "Native histogram with three exemplars scraped as classic histogram", + + enableNativeHistogramsIngestion: true, + scrapeText: `name: "test_histogram" +help: "Test histogram with many buckets removed to keep it manageable in size." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + > + bucket: < + cumulative_count: 32 + upper_bound: -0.0001899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "58215" + > + value: -0.00019 + timestamp: < + seconds: 1625851055 + nanos: 146848599 + > + > + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +> + +`, + scrapeClassicHistograms: true, + contentType: "application/vnd.google.protobuf", + floats: []floatSample{ + {metric: labels.FromStrings("__name__", "test_histogram_count"), t: 1234568, f: 175}, + {metric: labels.FromStrings("__name__", "test_histogram_sum"), t: 1234568, f: 0.0008280461746287094}, + {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"), t: 1234568, f: 2}, + {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), t: 1234568, f: 4}, + {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), t: 1234568, f: 16}, + {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0001899999999999998"), t: 1234568, f: 32}, + {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), t: 1234568, f: 175}, + }, + histograms: []histogramSample{{ + t: 1234568, + h: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + }}, + exemplars: []exemplar.Exemplar{ + // Native histogram one is arranged by timestamp. + // Exemplars with missing timestamps are dropped for native histograms. + {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}, + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, + // Classic histogram one is in order of appearance. + // Exemplars with missing timestamps are supported for classic histograms. + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false}, + {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}, + }, + }, + } + + for _, test := range tests { + t.Run(test.title, func(t *testing.T) { + app := &collectResultAppender{} + + discoveryLabels := &Target{ + labels: labels.FromStrings(test.discoveryLabels...), + } + + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.enableNativeHistogramIngestion = test.enableNativeHistogramsIngestion + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, discoveryLabels, false, nil) + } + sl.reportSampleMutator = func(l labels.Labels) labels.Labels { + return mutateReportSampleLabels(l, discoveryLabels) + } + sl.scrapeClassicHistograms = test.scrapeClassicHistograms + + now := time.Now() + + for i := range test.floats { + if test.floats[i].t != 0 { + continue + } + test.floats[i].t = timestamp.FromTime(now) + } + + // We need to set the timestamp for expected exemplars that does not have a timestamp. + for i := range test.exemplars { + if test.exemplars[i].Ts == 0 { + test.exemplars[i].Ts = timestamp.FromTime(now) + } + } + + buf := &bytes.Buffer{} + if test.contentType == "application/vnd.google.protobuf" { + // In case of protobuf, we have to create the binary representation. + pb := &dto.MetricFamily{} + // From text to proto message. + require.NoError(t, proto.UnmarshalText(test.scrapeText, pb)) + // From proto message to binary protobuf. + protoBuf, err := proto.Marshal(pb) + require.NoError(t, err) + + // Write first length, then binary protobuf. + varintBuf := binary.AppendUvarint(nil, uint64(len(protoBuf))) + buf.Write(varintBuf) + buf.Write(protoBuf) + } else { + buf.WriteString(test.scrapeText) + } + + _, _, _, err := sl.append(app, buf.Bytes(), test.contentType, now) + require.NoError(t, err) + require.NoError(t, app.Commit()) + requireEqual(t, test.floats, app.resultFloats) + requireEqual(t, test.histograms, app.resultHistograms) + requireEqual(t, test.exemplars, app.resultExemplars) + }) + } +} + +func TestScrapeLoopAppendExemplarSeries(t *testing.T) { + scrapeText := []string{`metric_total{n="1"} 1 # {t="1"} 1.0 10000 +# EOF`, `metric_total{n="1"} 2 # {t="2"} 2.0 20000 +# EOF`} + samples := []floatSample{{ + metric: labels.FromStrings("__name__", "metric_total", "n", "1"), + f: 1, + }, { + metric: labels.FromStrings("__name__", "metric_total", "n", "1"), + f: 2, + }} + exemplars := []exemplar.Exemplar{ + {Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true}, + {Labels: labels.FromStrings("t", "2"), Value: 2, Ts: 20000000, HasTs: true}, + } + discoveryLabels := &Target{ + labels: labels.FromStrings(), + } + + app := &collectResultAppender{} + + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, discoveryLabels, false, nil) + } + sl.reportSampleMutator = func(l labels.Labels) labels.Labels { + return mutateReportSampleLabels(l, discoveryLabels) + } + + now := time.Now() + + for i := range samples { + ts := now.Add(time.Second * time.Duration(i)) + samples[i].t = timestamp.FromTime(ts) + } + + // We need to set the timestamp for expected exemplars that does not have a timestamp. + for i := range exemplars { + if exemplars[i].Ts == 0 { + ts := now.Add(time.Second * time.Duration(i)) + exemplars[i].Ts = timestamp.FromTime(ts) + } + } + + for i, st := range scrapeText { + _, _, _, err := sl.append(app, []byte(st), "application/openmetrics-text", timestamp.Time(samples[i].t)) + require.NoError(t, err) + require.NoError(t, app.Commit()) + } + + requireEqual(t, samples, app.resultFloats) + requireEqual(t, exemplars, app.resultExemplars) +} + +func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) { + var ( + scraper = &scraperShim{} + appender = &collectResultAppender{} + app = func(ctx context.Context) storage.Appender { return appender } + ) + + ctx, cancel := context.WithCancel(context.Background()) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + + scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + cancel() + return errors.New("scrape failed") + } + + sl.run(nil) + require.Equal(t, 0.0, appender.resultFloats[0].f, "bad 'up' value") // nolint +} + +func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) { + var ( + scraper = &scraperShim{} + appender = &collectResultAppender{} + app = func(ctx context.Context) storage.Appender { return appender } + ) + + ctx, cancel := context.WithCancel(context.Background()) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + + scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + cancel() + _, _ = w.Write([]byte("a{l=\"\xff\"} 1\n")) + return nil + } + + sl.run(nil) + require.Equal(t, 0.0, appender.resultFloats[0].f, "bad 'up' value") // nolint +} + +type errorAppender struct { + collectResultAppender +} + +func (app *errorAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + switch lset.Get(model.MetricNameLabel) { + case "out_of_order": + return 0, storage.ErrOutOfOrderSample + case "amend": + return 0, storage.ErrDuplicateSampleForTimestamp + case "out_of_bounds": + return 0, storage.ErrOutOfBounds + default: + return app.collectResultAppender.Append(ref, lset, t, v) + } +} + +func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T) { + app := &errorAppender{} + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + + now := time.Unix(1, 0) + slApp := sl.appender(context.Background()) + total, added, seriesAdded, err := sl.append(slApp, []byte("out_of_order 1\namend 1\nnormal 1\nout_of_bounds 1\n"), "", now) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + want := []floatSample{ + { + metric: labels.FromStrings(model.MetricNameLabel, "normal"), + t: timestamp.FromTime(now), + f: 1, + }, + } + requireEqual(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender) + require.Equal(t, 4, total) + require.Equal(t, 4, added) + require.Equal(t, 1, seriesAdded) +} + +func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) { + app := &collectResultAppender{} + sl := newBasicScrapeLoop(t, context.Background(), nil, + func(ctx context.Context) storage.Appender { + return &timeLimitAppender{ + Appender: app, + maxTime: timestamp.FromTime(time.Now().Add(10 * time.Minute)), + } + }, + 0, + ) + + now := time.Now().Add(20 * time.Minute) + slApp := sl.appender(context.Background()) + total, added, seriesAdded, err := sl.append(slApp, []byte("normal 1\n"), "", now) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + require.Equal(t, 1, total) + require.Equal(t, 1, added) + require.Equal(t, 0, seriesAdded) +} + +const useGathererHandler = true + +func newHTTPTestServer(handler http.Handler) *httptest.Server { + if useGathererHandler { + server := httptest.NewUnstartedServer(handler) + server.URL = "http://not-started:8080" + SetDefaultGathererHandler(handler) + return server + } + server := httptest.NewServer(handler) + SetDefaultGathererHandler(nil) + return server +} + +func TestTargetScraperScrapeOK(t *testing.T) { + const ( + configTimeout = 1500 * time.Millisecond + expectedTimeout = "1.5" + ) + + var protobufParsing bool + + server := newHTTPTestServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if protobufParsing { + accept := r.Header.Get("Accept") + assert.True(t, strings.HasPrefix(accept, "application/vnd.google.protobuf;"), + "Expected Accept header to prefer application/vnd.google.protobuf.") + } + + timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds") + assert.Equal(t, expectedTimeout, timeout, "Expected scrape timeout header.") + + w.Header().Set("Content-Type", `text/plain; version=0.0.4`) + _, _ = w.Write([]byte("metric_a 1\nmetric_b 2\n")) + }), + ) + defer server.Close() + defer SetDefaultGathererHandler(nil) + + serverURL, err := url.Parse(server.URL) + if err != nil { + panic(err) + } + + runTest := func(acceptHeader string) { + ts := newScraper(&targetScraper{ + Target: &Target{ + labels: labels.FromStrings( + model.SchemeLabel, serverURL.Scheme, + model.AddressLabel, serverURL.Host, + ), + }, + client: http.DefaultClient, + timeout: configTimeout, + acceptHeader: acceptHeader, + }) + var buf bytes.Buffer + + resp, err := ts.scrape(context.Background()) + require.NoError(t, err) + contentType, err := ts.readResponse(context.Background(), resp, &buf) + require.NoError(t, err) + require.Equal(t, "text/plain; version=0.0.4", contentType) + require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String()) + } + + runTest(acceptHeader(config.DefaultScrapeProtocols)) + protobufParsing = true + runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols)) +} + +func TestTargetScrapeScrapeCancel(t *testing.T) { + block := make(chan struct{}) + + server := newHTTPTestServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + <-block + }), + ) + defer server.Close() + defer SetDefaultGathererHandler(nil) + + serverURL, err := url.Parse(server.URL) + if err != nil { + panic(err) + } + + ts := newScraper(&targetScraper{ + Target: &Target{ + labels: labels.FromStrings( + model.SchemeLabel, serverURL.Scheme, + model.AddressLabel, serverURL.Host, + ), + }, + client: http.DefaultClient, + acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols), + }) + ctx, cancel := context.WithCancel(context.Background()) + + errc := make(chan error, 1) + + go func() { + time.Sleep(1 * time.Second) + cancel() + }() + + go func() { + _, err := ts.scrape(ctx) + switch { + case err == nil: + errc <- errors.New("Expected error but got nil") + case !errors.Is(ctx.Err(), context.Canceled): + errc <- fmt.Errorf("Expected context cancellation error but got: %w", ctx.Err()) + default: + close(errc) + } + }() + + select { + case <-time.After(5 * time.Second): + require.FailNow(t, "Scrape function did not return unexpectedly.") + case err := <-errc: + require.NoError(t, err) + } + // If this is closed in a defer above the function the test server + // doesn't terminate and the test doesn't complete. + close(block) +} + +func TestTargetScrapeScrapeNotFound(t *testing.T) { + server := newHTTPTestServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + }), + ) + defer server.Close() + defer SetDefaultGathererHandler(nil) + + serverURL, err := url.Parse(server.URL) + if err != nil { + panic(err) + } + + ts := newScraper(&targetScraper{ + Target: &Target{ + labels: labels.FromStrings( + model.SchemeLabel, serverURL.Scheme, + model.AddressLabel, serverURL.Host, + ), + }, + client: http.DefaultClient, + acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols), + }) + + resp, err := ts.scrape(context.Background()) + require.NoError(t, err) + _, err = ts.readResponse(context.Background(), resp, io.Discard) + require.Error(t, err) + require.Contains(t, err.Error(), "404", "Expected \"404 NotFound\" error but got: %s", err) +} + +func TestTargetScraperBodySizeLimit(t *testing.T) { + const ( + bodySizeLimit = 15 + responseBody = "metric_a 1\nmetric_b 2\n" + ) + var gzipResponse bool + server := newHTTPTestServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", `text/plain; version=0.0.4`) + if gzipResponse { + w.Header().Set("Content-Encoding", "gzip") + gw := gzip.NewWriter(w) + defer gw.Close() + _, _ = gw.Write([]byte(responseBody)) + return + } + _, _ = w.Write([]byte(responseBody)) + }), + ) + defer server.Close() + defer SetDefaultGathererHandler(nil) + + serverURL, err := url.Parse(server.URL) + if err != nil { + panic(err) + } + + ts := &targetScraper{ + Target: &Target{ + labels: labels.FromStrings( + model.SchemeLabel, serverURL.Scheme, + model.AddressLabel, serverURL.Host, + ), + }, + client: http.DefaultClient, + bodySizeLimit: bodySizeLimit, + acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols), + metrics: newTestScrapeMetrics(t), + } + s := newScraper(ts) + var buf bytes.Buffer + + // Target response uncompressed body, scrape with body size limit. + resp, err := s.scrape(context.Background()) + require.NoError(t, err) + _, err = s.readResponse(context.Background(), resp, &buf) + require.ErrorIs(t, err, errBodySizeLimit) + require.Equal(t, bodySizeLimit, buf.Len()) + // Target response gzip compressed body, scrape with body size limit. + gzipResponse = true + buf.Reset() + resp, err = s.scrape(context.Background()) + require.NoError(t, err) + _, err = s.readResponse(context.Background(), resp, &buf) + require.ErrorIs(t, err, errBodySizeLimit) + require.Equal(t, bodySizeLimit, buf.Len()) + // Target response uncompressed body, scrape without body size limit. + gzipResponse = false + buf.Reset() + ts.bodySizeLimit = 0 + resp, err = s.scrape(context.Background()) + require.NoError(t, err) + _, err = s.readResponse(context.Background(), resp, &buf) + require.NoError(t, err) + require.Len(t, responseBody, buf.Len()) + // Target response gzip compressed body, scrape without body size limit. + gzipResponse = true + buf.Reset() + resp, err = s.scrape(context.Background()) + require.NoError(t, err) + _, err = s.readResponse(context.Background(), resp, &buf) + require.NoError(t, err) + require.Len(t, responseBody, buf.Len()) +} + +// testScraper implements the scraper interface and allows setting values +// returned by its methods. It also allows setting a custom scrape function. + +func TestScrapeLoop_RespectTimestamps(t *testing.T) { + s := teststorage.New(t) + defer s.Close() + + app := s.Appender(context.Background()) + capp := &collectResultAppender{next: app} + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0) + + now := time.Now() + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "", now) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + want := []floatSample{ + { + metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), + t: 0, + f: 1, + }, + } + require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender) +} + +func TestScrapeLoop_DiscardTimestamps(t *testing.T) { + s := teststorage.New(t) + defer s.Close() + + app := s.Appender(context.Background()) + + capp := &collectResultAppender{next: app} + + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0) + sl.honorTimestamps = false + + now := time.Now() + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "", now) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + want := []floatSample{ + { + metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), + t: timestamp.FromTime(now), + f: 1, + }, + } + require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender) +} + +func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { + s := teststorage.New(t) + defer s.Close() + + ctx, cancel := context.WithCancel(context.Background()) + sl := newBasicScrapeLoop(t, ctx, &scraperShim{}, s.Appender, 0) + defer cancel() + + // We add a good and a bad metric to check that both are discarded. + slApp := sl.appender(ctx) + _, _, _, err := sl.append(slApp, []byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "", time.Time{}) + require.Error(t, err) + require.NoError(t, slApp.Rollback()) + // We need to cycle staleness cache maps after a manual rollback. Otherwise they will have old entries in them, + // which would cause ErrDuplicateSampleForTimestamp errors on the next append. + sl.cache.iterDone(true) + + q, err := s.Querier(time.Time{}.UnixNano(), 0) + require.NoError(t, err) + series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) + require.False(t, series.Next(), "series found in tsdb") + require.NoError(t, series.Err()) + + // We add a good metric to check that it is recorded. + slApp = sl.appender(ctx) + _, _, _, err = sl.append(slApp, []byte("test_metric{le=\"500\"} 1\n"), "", time.Time{}) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + q, err = s.Querier(time.Time{}.UnixNano(), 0) + require.NoError(t, err) + series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "le", "500")) + require.True(t, series.Next(), "series not found in tsdb") + require.NoError(t, series.Err()) + require.False(t, series.Next(), "more than one series found in tsdb") +} + +func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { + s := teststorage.New(t) + defer s.Close() + + app := s.Appender(context.Background()) + + ctx, cancel := context.WithCancel(context.Background()) + sl := newBasicScrapeLoop(t, context.Background(), &scraperShim{}, func(ctx context.Context) storage.Appender { return app }, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + if l.Has("drop") { + return labels.FromStrings("no", "name") // This label set will trigger an error. + } + return l + } + defer cancel() + + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte("nok 1\nnok2{drop=\"drop\"} 1\n"), "", time.Time{}) + require.Error(t, err) + require.NoError(t, slApp.Rollback()) + require.Equal(t, errNameLabelMandatory, err) + + q, err := s.Querier(time.Time{}.UnixNano(), 0) + require.NoError(t, err) + series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) + require.False(t, series.Next(), "series found in tsdb") + require.NoError(t, series.Err()) +} + +func TestReusableConfig(t *testing.T) { + variants := []*config.ScrapeConfig{ + { + JobName: "prometheus", + ScrapeTimeout: model.Duration(15 * time.Second), + }, + { + JobName: "httpd", + ScrapeTimeout: model.Duration(15 * time.Second), + }, + { + JobName: "prometheus", + ScrapeTimeout: model.Duration(5 * time.Second), + }, + { + JobName: "prometheus", + MetricsPath: "/metrics", + }, + { + JobName: "prometheus", + MetricsPath: "/metrics2", + }, + { + JobName: "prometheus", + ScrapeTimeout: model.Duration(5 * time.Second), + MetricsPath: "/metrics2", + }, + { + JobName: "prometheus", + ScrapeInterval: model.Duration(5 * time.Second), + MetricsPath: "/metrics2", + }, + { + JobName: "prometheus", + ScrapeInterval: model.Duration(5 * time.Second), + SampleLimit: 1000, + MetricsPath: "/metrics2", + }, + } + + match := [][]int{ + {0, 2}, + {4, 5}, + {4, 6}, + {4, 7}, + {5, 6}, + {5, 7}, + {6, 7}, + } + noMatch := [][]int{ + {1, 2}, + {0, 4}, + {3, 4}, + } + + for i, m := range match { + require.True(t, reusableCache(variants[m[0]], variants[m[1]]), "match test %d", i) + require.True(t, reusableCache(variants[m[1]], variants[m[0]]), "match test %d", i) + require.True(t, reusableCache(variants[m[1]], variants[m[1]]), "match test %d", i) + require.True(t, reusableCache(variants[m[0]], variants[m[0]]), "match test %d", i) + } + for i, m := range noMatch { + require.False(t, reusableCache(variants[m[0]], variants[m[1]]), "not match test %d", i) + require.False(t, reusableCache(variants[m[1]], variants[m[0]]), "not match test %d", i) + } +} + +func TestScrapeAddFast(t *testing.T) { + s := teststorage.New(t) + defer s.Close() + + ctx, cancel := context.WithCancel(context.Background()) + sl := newBasicScrapeLoop(t, ctx, &scraperShim{}, s.Appender, 0) + defer cancel() + + slApp := sl.appender(ctx) + _, _, _, err := sl.append(slApp, []byte("up 1\n"), "", time.Time{}) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + + // Poison the cache. There is just one entry, and one series in the + // storage. Changing the ref will create a 'not found' error. + for _, v := range sl.getCache().series { + v.ref++ + } + + slApp = sl.appender(ctx) + _, _, _, err = sl.append(slApp, []byte("up 1\n"), "", time.Time{}.Add(time.Second)) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) +} + +func TestCheckAddError(t *testing.T) { + var appErrs appendErrors + sl := scrapeLoop{l: log.NewNopLogger(), metrics: newTestScrapeMetrics(t)} + _, _ = sl.checkAddError(nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs) + require.Equal(t, 1, appErrs.numOutOfOrder) +} + +func TestScrapeReportSingleAppender(t *testing.T) { + s := teststorage.New(t) + defer s.Close() + + var ( + signal = make(chan struct{}, 1) + scraper = &scraperShim{} + ) + + ctx, cancel := context.WithCancel(context.Background()) + sl := newBasicScrapeLoop(t, ctx, scraper, s.Appender, 10*time.Millisecond) + + numScrapes := 0 + + scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + numScrapes++ + if numScrapes%4 == 0 { + return errors.New("scrape failed") + } + _, _ = w.Write([]byte("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n")) + return nil + } + + go func() { + sl.run(nil) + signal <- struct{}{} + }() + + start := time.Now() + for time.Since(start) < 3*time.Second { + q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) + require.NoError(t, err) + series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".+")) + + c := 0 + for series.Next() { + i := series.At().Iterator(nil) + for i.Next() != chunkenc.ValNone { + c++ + } + } + + require.Equal(t, 0, c%9, "Appended samples not as expected: %d", c) + q.Close() + } + cancel() + + select { + case <-signal: + case <-time.After(5 * time.Second): + require.FailNow(t, "Scrape wasn't stopped.") + } +} + +func TestScrapeLoopLabelLimit(t *testing.T) { + tests := []struct { + title string + scrapeLabels string + discoveryLabels []string + labelLimits labelLimits + expectErr bool + }{ + { + title: "Valid number of labels", + scrapeLabels: `metric{l1="1", l2="2"} 0`, + discoveryLabels: nil, + labelLimits: labelLimits{labelLimit: 5}, + expectErr: false, + }, { + title: "Too many labels", + scrapeLabels: `metric{l1="1", l2="2", l3="3", l4="4", l5="5", l6="6"} 0`, + discoveryLabels: nil, + labelLimits: labelLimits{labelLimit: 5}, + expectErr: true, + }, { + title: "Too many labels including discovery labels", + scrapeLabels: `metric{l1="1", l2="2", l3="3", l4="4"} 0`, + discoveryLabels: []string{"l5", "5", "l6", "6"}, + labelLimits: labelLimits{labelLimit: 5}, + expectErr: true, + }, { + title: "Valid labels name length", + scrapeLabels: `metric{l1="1", l2="2"} 0`, + discoveryLabels: nil, + labelLimits: labelLimits{labelNameLengthLimit: 10}, + expectErr: false, + }, { + title: "Label name too long", + scrapeLabels: `metric{label_name_too_long="0"} 0`, + discoveryLabels: nil, + labelLimits: labelLimits{labelNameLengthLimit: 10}, + expectErr: true, + }, { + title: "Discovery label name too long", + scrapeLabels: `metric{l1="1", l2="2"} 0`, + discoveryLabels: []string{"label_name_too_long", "0"}, + labelLimits: labelLimits{labelNameLengthLimit: 10}, + expectErr: true, + }, { + title: "Valid labels value length", + scrapeLabels: `metric{l1="1", l2="2"} 0`, + discoveryLabels: nil, + labelLimits: labelLimits{labelValueLengthLimit: 10}, + expectErr: false, + }, { + title: "Label value too long", + scrapeLabels: `metric{l1="label_value_too_long"} 0`, + discoveryLabels: nil, + labelLimits: labelLimits{labelValueLengthLimit: 10}, + expectErr: true, + }, { + title: "Discovery label value too long", + scrapeLabels: `metric{l1="1", l2="2"} 0`, + discoveryLabels: []string{"l1", "label_value_too_long"}, + labelLimits: labelLimits{labelValueLengthLimit: 10}, + expectErr: true, + }, + } + + for _, test := range tests { + app := &collectResultAppender{} + + discoveryLabels := &Target{ + labels: labels.FromStrings(test.discoveryLabels...), + } + + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, discoveryLabels, false, nil) + } + sl.reportSampleMutator = func(l labels.Labels) labels.Labels { + return mutateReportSampleLabels(l, discoveryLabels) + } + sl.labelLimits = &test.labelLimits + + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "", time.Now()) + + t.Logf("Test:%s", test.title) + if test.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + } + } +} + +// Testing whether we can remove trailing .0 from histogram 'le' and summary 'quantile' labels. + +func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *testing.T) { + appender := &collectResultAppender{} + var ( + signal = make(chan struct{}, 1) + scraper = &scraperShim{} + app = func(ctx context.Context) storage.Appender { return appender } + ) + + ctx, cancel := context.WithCancel(context.Background()) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + sl.trackTimestampsStaleness = true + // Succeed once, several failures, then stop. + numScrapes := 0 + + scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + numScrapes++ + + switch numScrapes { + case 1: + _, _ = w.Write([]byte(fmt.Sprintf("metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond)))) + return nil + case 5: + cancel() + } + return errors.New("scrape failed") + } + + go func() { + sl.run(nil) + signal <- struct{}{} + }() + + select { + case <-signal: + case <-time.After(5 * time.Second): + t.Fatalf("Scrape wasn't stopped.") + } + + // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for + // each scrape successful or not. + require.Len(t, appender.resultFloats, 27, "Appended samples not as expected:\n%s", appender) + require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") // nolint + require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), + "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) +} + +func TestPickSchema(t *testing.T) { + tcs := []struct { + factor float64 + schema int32 + }{ + { + factor: 65536, + schema: -4, + }, + { + factor: 256, + schema: -3, + }, + { + factor: 16, + schema: -2, + }, + { + factor: 4, + schema: -1, + }, + { + factor: 2, + schema: 0, + }, + { + factor: 1.4, + schema: 1, + }, + { + factor: 1.1, + schema: 2, + }, + { + factor: 1.09, + schema: 3, + }, + { + factor: 1.04, + schema: 4, + }, + { + factor: 1.02, + schema: 5, + }, + { + factor: 1.01, + schema: 6, + }, + { + factor: 1.005, + schema: 7, + }, + { + factor: 1.002, + schema: 8, + }, + // The default value of native_histogram_min_bucket_factor + { + factor: 0, + schema: 8, + }, + } + + for _, tc := range tcs { + schema := pickSchema(tc.factor) + require.Equal(t, tc.schema, schema) + } +} + +func BenchmarkTargetScraperGzip(b *testing.B) { + scenarios := []struct { + metricsCount int + body []byte + }{ + {metricsCount: 1}, + {metricsCount: 100}, + {metricsCount: 1000}, + {metricsCount: 10000}, + {metricsCount: 100000}, + } + + for i := 0; i < len(scenarios); i++ { + var buf bytes.Buffer + var name string + gw := gzip.NewWriter(&buf) + for j := 0; j < scenarios[i].metricsCount; j++ { + name = fmt.Sprintf("go_memstats_alloc_bytes_total_%d", j) + fmt.Fprintf(gw, "# HELP %s Total number of bytes allocated, even if freed.\n", name) + fmt.Fprintf(gw, "# TYPE %s counter\n", name) + fmt.Fprintf(gw, "%s %d\n", name, i*j) + } + gw.Close() + scenarios[i].body = buf.Bytes() + } + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", `text/plain; version=0.0.4`) + w.Header().Set("Content-Encoding", "gzip") + for _, scenario := range scenarios { + if strconv.Itoa(scenario.metricsCount) == r.URL.Query()["count"][0] { + w.Write(scenario.body) // nolint + return + } + } + w.WriteHeader(http.StatusBadRequest) + }) + + server := httptest.NewServer(handler) + defer server.Close() + + serverURL, err := url.Parse(server.URL) + if err != nil { + panic(err) + } + + client, err := config_util.NewClientFromConfig(config_util.DefaultHTTPClientConfig, "test_job") + if err != nil { + panic(err) + } + + for _, scenario := range scenarios { + b.Run(fmt.Sprintf("metrics=%d", scenario.metricsCount), func(b *testing.B) { + ts := newScraper(&targetScraper{ + Target: &Target{ + labels: labels.FromStrings( + model.SchemeLabel, serverURL.Scheme, + model.AddressLabel, serverURL.Host, + ), + params: url.Values{"count": []string{strconv.Itoa(scenario.metricsCount)}}, + }, + client: client, + timeout: time.Second, + }) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err = ts.scrape(context.Background()) + require.NoError(b, err) + } + }) + } +} + +// When a scrape contains multiple instances for the same time series we should increment +// prometheus_target_scrapes_sample_duplicate_timestamp_total metric. +func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) { + ctx, sl := simpleTestScrapeLoop(t) + + slApp := sl.appender(ctx) + total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\ntest_metric 2\ntest_metric 3\n"), "", time.Time{}) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + require.Equal(t, 3, total) + require.Equal(t, 3, added) + require.Equal(t, 1, seriesAdded) + require.Equal(t, 2.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate)) // nolint + + slApp = sl.appender(ctx) + total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\ntest_metric 1\ntest_metric 1\n"), "", time.Time{}) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + require.Equal(t, 3, total) + require.Equal(t, 3, added) + require.Equal(t, 0, seriesAdded) + require.Equal(t, 4.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate)) // nolint + + // When different timestamps are supplied, multiple samples are accepted. + slApp = sl.appender(ctx) + total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1 1001\ntest_metric 1 1002\ntest_metric 1 1003\n"), "", time.Time{}) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + require.Equal(t, 3, total) + require.Equal(t, 3, added) + require.Equal(t, 0, seriesAdded) + // Metric is not higher than last time. + require.Equal(t, 4.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate)) // nolint +} diff --git a/pkg/promotel/internal/prometheus/scrape/target.go b/pkg/promotel/internal/prometheus/scrape/target.go new file mode 100644 index 0000000000..e53da36a8e --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/target.go @@ -0,0 +1,565 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scrape + +import ( + "errors" + "fmt" + "hash/fnv" + "net" + "net/url" + "strings" + "sync" + "time" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/storage" +) + +// TargetHealth describes the health state of a target. +type TargetHealth string + +// The possible health states of a target based on the last performed scrape. +const ( + HealthUnknown TargetHealth = "unknown" + HealthGood TargetHealth = "up" + HealthBad TargetHealth = "down" +) + +// Target refers to a singular HTTP or HTTPS endpoint. +type Target struct { + // Labels before any processing. + discoveredLabels labels.Labels + // Any labels that are added to this target and its metrics. + labels labels.Labels + // Additional URL parameters that are part of the target URL. + params url.Values + + mtx sync.RWMutex + lastError error + lastScrape time.Time + lastScrapeDuration time.Duration + health TargetHealth + metadata MetricMetadataStore +} + +// NewTarget creates a reasonably configured target for querying. +func NewTarget(labels, discoveredLabels labels.Labels, params url.Values) *Target { + return &Target{ + labels: labels, + discoveredLabels: discoveredLabels, + params: params, + health: HealthUnknown, + } +} + +func (t *Target) String() string { + return t.URL().String() +} + +// MetricMetadataStore represents a storage for metadata. +type MetricMetadataStore interface { + ListMetadata() []MetricMetadata + GetMetadata(metric string) (MetricMetadata, bool) + SizeMetadata() int + LengthMetadata() int +} + +// MetricMetadata is a piece of metadata for a metric. +type MetricMetadata struct { + Metric string + Type model.MetricType + Help string + Unit string +} + +func (t *Target) ListMetadata() []MetricMetadata { + t.mtx.RLock() + defer t.mtx.RUnlock() + + if t.metadata == nil { + return nil + } + return t.metadata.ListMetadata() +} + +func (t *Target) SizeMetadata() int { + t.mtx.RLock() + defer t.mtx.RUnlock() + + if t.metadata == nil { + return 0 + } + + return t.metadata.SizeMetadata() +} + +func (t *Target) LengthMetadata() int { + t.mtx.RLock() + defer t.mtx.RUnlock() + + if t.metadata == nil { + return 0 + } + + return t.metadata.LengthMetadata() +} + +// GetMetadata returns type and help metadata for the given metric. +func (t *Target) GetMetadata(metric string) (MetricMetadata, bool) { + t.mtx.RLock() + defer t.mtx.RUnlock() + + if t.metadata == nil { + return MetricMetadata{}, false + } + return t.metadata.GetMetadata(metric) +} + +func (t *Target) SetMetadataStore(s MetricMetadataStore) { + t.mtx.Lock() + defer t.mtx.Unlock() + t.metadata = s +} + +// hash returns an identifying hash for the target. +func (t *Target) hash() uint64 { + h := fnv.New64a() + + h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash()))) + h.Write([]byte(t.URL().String())) + + return h.Sum64() +} + +// offset returns the time until the next scrape cycle for the target. +// It includes the global server offsetSeed for scrapes from multiple Prometheus to try to be at different times. +func (t *Target) offset(interval time.Duration, offsetSeed uint64) time.Duration { + now := time.Now().UnixNano() + + // Base is a pinned to absolute time, no matter how often offset is called. + var ( + base = int64(interval) - now%int64(interval) + offset = (t.hash() ^ offsetSeed) % uint64(interval) // nolint + next = base + int64(offset) // nolint + ) + + if next > int64(interval) { + next -= int64(interval) + } + return time.Duration(next) +} + +// Labels returns a copy of the set of all public labels of the target. +func (t *Target) Labels(b *labels.ScratchBuilder) labels.Labels { + b.Reset() + t.labels.Range(func(l labels.Label) { + if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) { + b.Add(l.Name, l.Value) + } + }) + return b.Labels() +} + +// LabelsRange calls f on each public label of the target. +func (t *Target) LabelsRange(f func(l labels.Label)) { + t.labels.Range(func(l labels.Label) { + if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) { + f(l) + } + }) +} + +// DiscoveredLabels returns a copy of the target's labels before any processing. +func (t *Target) DiscoveredLabels() labels.Labels { + t.mtx.Lock() + defer t.mtx.Unlock() + return t.discoveredLabels.Copy() +} + +// SetDiscoveredLabels sets new DiscoveredLabels. +func (t *Target) SetDiscoveredLabels(l labels.Labels) { + t.mtx.Lock() + defer t.mtx.Unlock() + t.discoveredLabels = l +} + +// URL returns a copy of the target's URL. +func (t *Target) URL() *url.URL { + params := url.Values{} + + for k, v := range t.params { + params[k] = make([]string, len(v)) + copy(params[k], v) + } + t.labels.Range(func(l labels.Label) { + if !strings.HasPrefix(l.Name, model.ParamLabelPrefix) { + return + } + ks := l.Name[len(model.ParamLabelPrefix):] + + if len(params[ks]) > 0 { + params[ks][0] = l.Value + } else { + params[ks] = []string{l.Value} + } + }) + + return &url.URL{ + Scheme: t.labels.Get(model.SchemeLabel), + Host: t.labels.Get(model.AddressLabel), + Path: t.labels.Get(model.MetricsPathLabel), + RawQuery: params.Encode(), + } +} + +// Report sets target data about the last scrape. +func (t *Target) Report(start time.Time, dur time.Duration, err error) { + t.mtx.Lock() + defer t.mtx.Unlock() + + if err == nil { + t.health = HealthGood + } else { + t.health = HealthBad + } + + t.lastError = err + t.lastScrape = start + t.lastScrapeDuration = dur +} + +// LastError returns the error encountered during the last scrape. +func (t *Target) LastError() error { + t.mtx.RLock() + defer t.mtx.RUnlock() + + return t.lastError +} + +// LastScrape returns the time of the last scrape. +func (t *Target) LastScrape() time.Time { + t.mtx.RLock() + defer t.mtx.RUnlock() + + return t.lastScrape +} + +// LastScrapeDuration returns how long the last scrape of the target took. +func (t *Target) LastScrapeDuration() time.Duration { + t.mtx.RLock() + defer t.mtx.RUnlock() + + return t.lastScrapeDuration +} + +// Health returns the last known health state of the target. +func (t *Target) Health() TargetHealth { + t.mtx.RLock() + defer t.mtx.RUnlock() + + return t.health +} + +// GetValue gets a label value from the entire label set. +func (t *Target) GetValue(name string) string { + return t.labels.Get(name) +} + +// Targets is a sortable list of targets. +type Targets []*Target + +func (ts Targets) Len() int { return len(ts) } +func (ts Targets) Less(i, j int) bool { return ts[i].URL().String() < ts[j].URL().String() } +func (ts Targets) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } + +var ( + errSampleLimit = errors.New("sample limit exceeded") + errBucketLimit = errors.New("histogram bucket limit exceeded") +) + +// limitAppender limits the number of total appended samples in a batch. +type limitAppender struct { + storage.Appender + + limit int + i int +} + +func (app *limitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + if !value.IsStaleNaN(v) { + app.i++ + if app.i > app.limit { + return 0, errSampleLimit + } + } + ref, err := app.Appender.Append(ref, lset, t, v) + if err != nil { + return 0, err + } + return ref, nil +} + +type timeLimitAppender struct { + storage.Appender + + maxTime int64 +} + +func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + if t > app.maxTime { + return 0, storage.ErrOutOfBounds + } + + ref, err := app.Appender.Append(ref, lset, t, v) + if err != nil { + return 0, err + } + return ref, nil +} + +// bucketLimitAppender limits the number of total appended samples in a batch. +type bucketLimitAppender struct { + storage.Appender + + limit int +} + +func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if h != nil { + // Return with an early error if the histogram has too many buckets and the + // schema is not exponential, in which case we can't reduce the resolution. + if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(h.Schema) { + return 0, errBucketLimit + } + for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { + if h.Schema <= histogram.ExponentialSchemaMin { + return 0, errBucketLimit + } + h = h.ReduceResolution(h.Schema - 1) + } + } + if fh != nil { + // Return with an early error if the histogram has too many buckets and the + // schema is not exponential, in which case we can't reduce the resolution. + if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(fh.Schema) { + return 0, errBucketLimit + } + for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { + if fh.Schema <= histogram.ExponentialSchemaMin { + return 0, errBucketLimit + } + fh = fh.ReduceResolution(fh.Schema - 1) + } + } + ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh) + if err != nil { + return 0, err + } + return ref, nil +} + +type maxSchemaAppender struct { + storage.Appender + + maxSchema int32 +} + +func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if h != nil { + if histogram.IsExponentialSchema(h.Schema) && h.Schema > app.maxSchema { + h = h.ReduceResolution(app.maxSchema) + } + } + if fh != nil { + if histogram.IsExponentialSchema(fh.Schema) && fh.Schema > app.maxSchema { + fh = fh.ReduceResolution(app.maxSchema) + } + } + ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh) + if err != nil { + return 0, err + } + return ref, nil +} + +// PopulateLabels builds a label set from the given label set and scrape configuration. +// It returns a label set before relabeling was applied as the second return value. +// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. +func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort bool) (res, orig labels.Labels, err error) { + // Copy labels into the labelset for the target if they are not set already. + scrapeLabels := []labels.Label{ + {Name: model.JobLabel, Value: cfg.JobName}, + {Name: model.ScrapeIntervalLabel, Value: cfg.ScrapeInterval.String()}, + {Name: model.ScrapeTimeoutLabel, Value: cfg.ScrapeTimeout.String()}, + {Name: model.MetricsPathLabel, Value: cfg.MetricsPath}, + {Name: model.SchemeLabel, Value: cfg.Scheme}, + } + + for _, l := range scrapeLabels { + if lb.Get(l.Name) == "" { + lb.Set(l.Name, l.Value) + } + } + // Encode scrape query parameters as labels. + for k, v := range cfg.Params { + if len(v) > 0 { + lb.Set(model.ParamLabelPrefix+k, v[0]) + } + } + + preRelabelLabels := lb.Labels() + keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) + + // Check if the target was dropped. + if !keep { + return labels.EmptyLabels(), preRelabelLabels, nil + } + if v := lb.Get(model.AddressLabel); v == "" { + return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address") + } + + // addPort checks whether we should add a default port to the address. + // If the address is not valid, we don't append a port either. + addPort := func(s string) (string, string, bool) { + // If we can split, a port exists and we don't have to add one. + if host, port, err := net.SplitHostPort(s); err == nil { // nolint + return host, port, false + } + // If adding a port makes it valid, the previous error + // was not due to an invalid address and we can append a port. + _, _, e := net.SplitHostPort(s + ":1234") + return "", "", e == nil + } + + addr := lb.Get(model.AddressLabel) + scheme := lb.Get(model.SchemeLabel) + host, port, add := addPort(addr) + // If it's an address with no trailing port, infer it based on the used scheme + // unless the no-default-scrape-port feature flag is present. + if !noDefaultPort && add { + // Addresses reaching this point are already wrapped in [] if necessary. + switch scheme { + case "http", "": + addr += ":80" + case "https": + addr += ":443" + default: + return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("invalid scheme: %q", cfg.Scheme) + } + lb.Set(model.AddressLabel, addr) + } + + if noDefaultPort { + // If it's an address with a trailing default port and the + // no-default-scrape-port flag is present, remove the port. + switch port { + case "80": + if scheme == "http" { + lb.Set(model.AddressLabel, host) + } + case "443": + if scheme == "https" { + lb.Set(model.AddressLabel, host) + } + } + } + + if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { // nolint + return labels.EmptyLabels(), labels.EmptyLabels(), err + } + + interval := lb.Get(model.ScrapeIntervalLabel) + intervalDuration, err := model.ParseDuration(interval) + if err != nil { + return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape interval: %w", err) + } + if time.Duration(intervalDuration) == 0 { + return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape interval cannot be 0") + } + + timeout := lb.Get(model.ScrapeTimeoutLabel) + timeoutDuration, err := model.ParseDuration(timeout) + if err != nil { + return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape timeout: %w", err) + } + if time.Duration(timeoutDuration) == 0 { + return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape timeout cannot be 0") + } + + if timeoutDuration > intervalDuration { + return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval) + } + + // Meta labels are deleted after relabelling. Other internal labels propagate to + // the target which decides whether they will be part of their label set. + lb.Range(func(l labels.Label) { + if strings.HasPrefix(l.Name, model.MetaLabelPrefix) { + lb.Del(l.Name) + } + }) + + // Default the instance label to the target address. + if v := lb.Get(model.InstanceLabel); v == "" { + lb.Set(model.InstanceLabel, addr) + } + + res = lb.Labels() + err = res.Validate(func(l labels.Label) error { + // Check label values are valid, drop the target if not. + if !model.LabelValue(l.Value).IsValid() { + return fmt.Errorf("invalid label value for %q: %q", l.Name, l.Value) + } + return nil + }) + if err != nil { + return labels.EmptyLabels(), labels.EmptyLabels(), err + } + return res, preRelabelLabels, nil +} + +// TargetsFromGroup builds targets based on the given TargetGroup and config. +func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool, targets []*Target, lb *labels.Builder) ([]*Target, []error) { + targets = targets[:0] + failures := []error{} + + for i, tlset := range tg.Targets { + lb.Reset(labels.EmptyLabels()) + + for ln, lv := range tlset { + lb.Set(string(ln), string(lv)) + } + for ln, lv := range tg.Labels { + if _, ok := tlset[ln]; !ok { + lb.Set(string(ln), string(lv)) + } + } + + lset, origLabels, err := PopulateLabels(lb, cfg, noDefaultPort) + if err != nil { + failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err)) + } + if !lset.IsEmpty() || !origLabels.IsEmpty() { + targets = append(targets, NewTarget(lset, origLabels, cfg.Params)) + } + } + return targets, failures +} diff --git a/pkg/promotel/internal/prometheus/scrape/testdata/bearertoken.txt b/pkg/promotel/internal/prometheus/scrape/testdata/bearertoken.txt new file mode 100644 index 0000000000..e56e15bb7d --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/testdata/bearertoken.txt @@ -0,0 +1 @@ +12345 diff --git a/pkg/promotel/internal/prometheus/scrape/testdata/ca.cer b/pkg/promotel/internal/prometheus/scrape/testdata/ca.cer new file mode 100644 index 0000000000..86f627a903 --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/testdata/ca.cer @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDkTCCAnmgAwIBAgIJAJNsnimNN3tmMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV +BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg +Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0xNTA4 +MDQxNDA5MjFaFw0yNTA4MDExNDA5MjFaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH +DAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxGzAZ +BgNVBAMMElByb21ldGhldXMgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAOlSBU3yWpUELbhzizznR0hnAL7dbEHzfEtEc6N3PoSvMNcqrUVq +t4kjBRWzqkZ5uJVkzBPERKEBoOI9pWcrqtMTBkMzHJY2Ep7GHTab10e9KC2IFQT6 +FKP/jCYixaIVx3azEfajRJooD8r79FGoagWUfHdHyCFWJb/iLt8z8+S91kelSRMS +yB9M1ypWomzBz1UFXZp1oiNO5o7/dgXW4MgLUfC2obJ9j5xqpc6GkhWMW4ZFwEr/ +VLjuzxG9B8tLfQuhnXKGn1W8+WzZVWCWMD/sLfZfmjKaWlwcXzL51g8E+IEIBJqV +w51aMI6lDkcvAM7gLq1auLZMVXyKWSKw7XMCAwEAAaNQME4wHQYDVR0OBBYEFMz1 +BZnlqxJp2HiJSjHK8IsLrWYbMB8GA1UdIwQYMBaAFMz1BZnlqxJp2HiJSjHK8IsL +rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAI2iA3w3TK5J15Pu +e4fPFB4jxQqsbUwuyXbCCv/jKLeFNCD4BjM181WZEYjPMumeTBVzU3aF45LWQIG1 +0DJcrCL4mjMz9qgAoGqA7aDDXiJGbukMgYYsn7vrnVmrZH8T3E8ySlltr7+W578k +pJ5FxnbCroQwn0zLyVB3sFbS8E3vpBr3L8oy8PwPHhIScexcNVc3V6/m4vTZsXTH +U+vUm1XhDgpDcFMTg2QQiJbfpOYUkwIgnRDAT7t282t2KQWtnlqc3zwPQ1F/6Cpx +j19JeNsaF1DArkD7YlyKj/GhZLtHwFHG5cxznH0mLDJTW7bQvqqh2iQTeXmBk1lU +mM5lH/s= +-----END CERTIFICATE----- diff --git a/pkg/promotel/internal/prometheus/scrape/testdata/ca.key b/pkg/promotel/internal/prometheus/scrape/testdata/ca.key new file mode 100644 index 0000000000..1db2600376 --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/testdata/ca.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEA6VIFTfJalQQtuHOLPOdHSGcAvt1sQfN8S0Rzo3c+hK8w1yqt +RWq3iSMFFbOqRnm4lWTME8REoQGg4j2lZyuq0xMGQzMcljYSnsYdNpvXR70oLYgV +BPoUo/+MJiLFohXHdrMR9qNEmigPyvv0UahqBZR8d0fIIVYlv+Iu3zPz5L3WR6VJ +ExLIH0zXKlaibMHPVQVdmnWiI07mjv92BdbgyAtR8Lahsn2PnGqlzoaSFYxbhkXA +Sv9UuO7PEb0Hy0t9C6GdcoafVbz5bNlVYJYwP+wt9l+aMppaXBxfMvnWDwT4gQgE +mpXDnVowjqUORy8AzuAurVq4tkxVfIpZIrDtcwIDAQABAoIBAQCcVDd3pYWpyLX1 +m31UnkX1rgYi3Gs3uTOznra4dSIvds6LrG2SUFGPEibLBql1NQNHHdVa/StakaPB +UrqraOe5K0sL5Ygm4S4Ssf1K5JoW2Be+gipLPmBsDcJSnwO6eUs/LfZAQd6qR2Nl +hvGJcQUwne/TYAYox/bdHWh4Zu/odz4NrZKZLbnXkdLLDEhZbjA0HpwJZ7NpMcB7 +Z6NayOm5dAZncfqBjY+3GNL0VjvDjwwYbESM8GkAbojMgcpODGk0h9arRWCP2RqT +SVgmiFI2mVT7sW1XLdVXmyCL2jzak7sktpbLbVgngwOrBmLO/m4NBftzcZrgvxj3 +YakCPH/hAoGBAP1v85pIxqWr5dFdRlOW0MG35ifL+YXpavcs233jGDHYNZefrR5q +Mw8eA20zwj41OdryqGh58nLYm3zYM0vPFrRJrzWYQfcWDmQELAylr9z9vsMj8gRq +IZQD6wzFmLi1PN2QDmovF+2y/CLAq03XK6FQlNsVQxubfjh4hcX5+nXDAoGBAOut +/pQaIBbIhaI8y3KjpizU24jxIkV8R/q1yE5V01YCl2OC5hEd4iZP14YLDRXLSHKT +e/dyJ/OEyTIzUeDg0ZF3ao9ugbWuASgrnrrdPEooi7C9n9PeaLFTK5oVZoVP2A7E +BwhSFW3VdEzQkdJczVE2jOY6JdBKMndjoDQnhT6RAoGBAL4WMO1gdnYeZ0JQJoZd +kPgrOZpR2DaDa3I3F+3k3enM0+2EmzE70E4fYcyPTLqh62H4LS4ngRx4sK7D7j2G +9u2EcsDNEXUE+wgzROK7hxtGysTMeiKrg8Hj6nFq53Bqp1s7SESGS/lCDPD398Rr +hdL5gJyN5waW6uXqJ9Pk+eFHAoGBAKV/YGcV1XTKSPT9ZgxRmM6ghq0qT1umA1Gt +t0QzBp2+Yhqx/+cDKhynMnxhZEXqoyw6HvJLSny5wSMsYJHeratNxRmFizZOQ2e3 +AdbMppqY0EdDUWnRI4lqExM3de+let4bj6irI3smSm3qhIvJOTCPcu/04zrZ74hh +AE2/dtTRAoGBAO6bENEqLgxZOvX5NnbytTuuoEnbceUPiIvc6S/nWJPEoGXVN2EJ +a3OaIOQmknE6bjXIWrHTaXJhwejvPUz9DVa4GxU5aJhs4gpocVGf+owQFvk4nJO8 +JL+QVVdXp3XdrXIGyvXJfy0fXXgJg5czrnDHjSTE8/2POtyuZ6VyBtQc +-----END RSA PRIVATE KEY----- diff --git a/pkg/promotel/internal/prometheus/scrape/testdata/client.cer b/pkg/promotel/internal/prometheus/scrape/testdata/client.cer new file mode 100644 index 0000000000..aeeca617fd --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/testdata/client.cer @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE----- +MIIErjCCA5agAwIBAgIUZSocqIiJtB3sMwam1lxD89SYebUwDQYJKoZIhvcNAQEL +BQAwXzELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UE +CgwTRGVmYXVsdCBDb21wYW55IEx0ZDEbMBkGA1UEAwwSUHJvbWV0aGV1cyBUZXN0 +IENBMB4XDTIyMDMxNzA3MDAzNloXDTQ5MDMxNzA3MDAzNlowajELMAkGA1UEBhMC +WFgxEzARBgNVBAgMClNvbWUtU3RhdGUxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEc +MBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDERMA8GA1UEAwwIdGVzdHVzZXIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDigQV8d+V/fgSxqmP3Atvp +UGGc59d2fGRA61Cqb+Z/dUANAf2GRN4A0p5Nsq8vHcZqdTSbV5vFq6yEEgZ7Exji +sMiMEdYHIIFGDw/Xwssu0+8u2G8wRvHaxHMYiv2CFsoqNH6UQn0/W9vnzhHmPtPr +YBVfvl4211c10QgTJrGdtnDpuLdrexhygWYWKFKD4Cow5/uLf669K9aHeslTqHGA +qOka7kB7Fjx4+kJVPFRrrZA+z6by0vsNpf+iapmQx5WOqOqQ4nQvGpKfIv7d6TQZ +XsZOrLf/GGeUvY+i46gum1J3q1GQeNG+i/Dl18Fkt0ucurdOwNUCJz79g2Shm5/F +Lo+0b0uTgGXQ59T/jOWlZUS6GG3UxySC6y4Pm8cgyKJKjjWy4I+/ABU+mtNd3iQG +SQZHqGjew423/3eRY0Q2nR9m19FLq9wD4eHG7/tBaNdwjz0nJAPRFKONqgZii1sr +POfA/L0Sskv5qvyHkS9ACMCrmhCCbT7YsAk55Oj7xX0LzvNM/xMqvrr0QaGW61h2 +/txkfKzc7vBnNVG0wvo5xmLW0Iccdf+52nudlxkaaZZ8DzvXG/qcdmX/NNofpT90 +jGP10vp8kwKf43Mv4zgMK2SmOK4M/uWZigiOtTWdIqL2iuyaqPakzIko8pGQmphY +ZuKYPIrRAg+RYZ1HrSemsQIDAQABo1cwVTATBgNVHSUEDDAKBggrBgEFBQcDAjAd +BgNVHQ4EFgQUQkOl8D7Cn1gpwoxg1LNKBM+mGnQwHwYDVR0jBBgwFoAUzPUFmeWr +EmnYeIlKMcrwiwutZhswDQYJKoZIhvcNAQELBQADggEBALQ7dkH4sSnnAU+LRdxf +SfzyEiG5YHArP0hT3CGOaVDRqftimLyYxTQXU/jI3LG3ai+SuCQiDyrhHvlDCzZA +gA9FaEI9d06nK8gu27i5PCHNegB8acPBPZdk+FxLU/XGKmbQiYG8Hqssc7C9gG0r +hiJX4KrqVgdtbxXTaP9p3dIc9N1EXJh8CX9E+QuNS7/E3cx+asPu2PiL+zt4G5tM +1kLxibnAF6zoXBUN0ap60BjJ+v9mxQYimqY4XEuSUo4RxVh0z19UAxuWEhbuWAvq +7Zk2AHG0i65w3XNuBPbICp/C9zxzcCd/3AlB6fJCkHYeTTeUUn5jqLNV89XdwjCh +nOI= +-----END CERTIFICATE----- diff --git a/pkg/promotel/internal/prometheus/scrape/testdata/client.key b/pkg/promotel/internal/prometheus/scrape/testdata/client.key new file mode 100644 index 0000000000..e584b7eada --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/testdata/client.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA4oEFfHflf34Esapj9wLb6VBhnOfXdnxkQOtQqm/mf3VADQH9 +hkTeANKeTbKvLx3GanU0m1ebxaushBIGexMY4rDIjBHWByCBRg8P18LLLtPvLthv +MEbx2sRzGIr9ghbKKjR+lEJ9P1vb584R5j7T62AVX75eNtdXNdEIEyaxnbZw6bi3 +a3sYcoFmFihSg+AqMOf7i3+uvSvWh3rJU6hxgKjpGu5AexY8ePpCVTxUa62QPs+m +8tL7DaX/omqZkMeVjqjqkOJ0LxqSnyL+3ek0GV7GTqy3/xhnlL2PouOoLptSd6tR +kHjRvovw5dfBZLdLnLq3TsDVAic+/YNkoZufxS6PtG9Lk4Bl0OfU/4zlpWVEuhht +1MckgusuD5vHIMiiSo41suCPvwAVPprTXd4kBkkGR6ho3sONt/93kWNENp0fZtfR +S6vcA+Hhxu/7QWjXcI89JyQD0RSjjaoGYotbKzznwPy9ErJL+ar8h5EvQAjAq5oQ +gm0+2LAJOeTo+8V9C87zTP8TKr669EGhlutYdv7cZHys3O7wZzVRtML6OcZi1tCH +HHX/udp7nZcZGmmWfA871xv6nHZl/zTaH6U/dIxj9dL6fJMCn+NzL+M4DCtkpjiu +DP7lmYoIjrU1nSKi9orsmqj2pMyJKPKRkJqYWGbimDyK0QIPkWGdR60nprECAwEA +AQKCAgEA18az1ERf9Fm33Q0GmE039IdnxlMy9qQ/2XyS5xsdCXVIZFvuClhW6Y+7 +0ScVLpx95fLr/8SxF9mYymRlmh+ySFrDYnSnYTi9DmHQ5OmkKGMr64OyQNqFErSt +NMdMA/7z7sr9fv3sVUyMLMMqWB6oQgXRttki5bm1UgZlW+EzuZwQ6wbWbWTiAEt3 +VkppeUo2x0poXxdu/rXhdEUrwC+qmTfQgaBQ+zFOwK0gPhTwE3hP/xZQ4+jL08+8 +vRwyWTNZLYOLmiSxLCJzZXiwNfUwda7M2iw+SJ0WKCOBz1pzYJsFMA2b8Ta4EX89 +Kailiu328UMK19Jp2dhLcLUYS8B2rVVAK5b/O6iKV8UpKTriXDiCKSpcugpsQ1ML +zq/6vR0SQXD+/W0MesGaNa33votBXJSsf9kZnYJw43n+W4Z/XFUE5pyNM/+TGAqw +yuF4FX2sJL1uP5VMOh2HdthTr+/ewx/Trn9/re0p54z83plVlp4qbcORLiQ2uDf6 +ZZ0/gHzNTp4Fzz81ZvHLm9smpe8cLvojrKLvCl0hv5zAf3QtsajpTN9uM7AsshV1 +QVZSuAxb5n9bcij5F2za1/dd7WLlvsSzgNJ4Td/gEDI8qepB0+7PGlJ17sMg0nWP +nFxUfGIsCF1KOoPwLyaNHHrRGjJigFUufqkbmSWkOzgC6pZVUXECggEBAP81To16 +O5BlGDahcQkjKkqUwUtkhjE9/KQBh3zHqxsitI8f0U7eL3Ge1qhbgEgvHwHOjWSV +pcG9atE55b7qlqqGQboiO1jfyLfIVLfamj0fHLinO/pV/wcBNy6Hz4rP7DNJDCMz +0agz/Ys3VXrZIk5sO0sUBYMBxho1x0n65Z06iK1SwD/x4Xg3/Psyx+ujEEkSsv5I +Gg7aOTHLRSIPUx/OK+4M3sp58PeMGfEYNYxNiEoMiUQgu/srKRjs+pUKXCkEraNW +8s/ODYJ7iso6Z1z4NxfBH+hh+UrxTffh7t0Sz5gdUwUnBNb2I4EdeCcCTOnWYkut +/GKW8oHD7f9VDS0CggEBAOM06rrp9rSsl6UhTu8LS5bjBeyUxab4HLZKP5YBitQO +ltcPS05MxQ3UQ1BAMDRjXE2nrKlWMOAybrffEXBi4U1jYt7CiuCwwsPyaYNWT5qO +Iwdjebkeq3+Mh8c48swhOwRLWSGg6mtRoR/c5cthYU62+s2zdxc/yhVTQ0WNFabT +23PYtjjW41WuR6K7Dhrdcw0MwIs1arZHTsDdU6Hln9raTSNwlHMBWVz/tzuwLieQ +WEUXvsQvPtgPyohmDd0ueXiuS2FiYaXKFIMFj5/JyyJc1OCr1vIQN8mMcUjNbk2I +VaeeSPawgKIiYARhbjJtjwjY6D59gOZrNGYASQOTGhUCggEAJPOB8SgekbShgd90 +L1+BExVgu1rNtzmDZ/e0t1Ntqdsni4WO172B3xChgfTlqQ3xjmBqxoKIYnnbinm4 +kyECOaSAxcOJFkAonruJ0Kj9JhZoITBNldx3tXruk3UkjrO2PmK4OCybkaAdeNfF +L6lat0Iif6dheOt71HWu6j5CmrZL7dSKc3fBLpfksDZVDgApLntfoUOtSjM8jsIg +u2K+pV9Dqw7//w8S3bTSWL8pmavsLNSN12hp7177b1l4mrXKTEIaJglD1OS/vgHH +QaqdJq/lwjG7PflZkAlKQbbbz/SWTC8Kwzc4EyvGTj6HFBbYLg9VYiHJ5jh22mUV +A6A77QKCAQAM6DWpdp8QNnnK5LCCPecGZFEy1mTADno7FM6169KCJ24EO5cwlIXh +Ojy0s2DJqRdWRf82A3J1WggWI/Luqn9YERxNwUl4aDI4RW4fCuksw4RT6B/DF23w +qgAQnjiUxhJ/NPSUR3rpq9J2Z+sZ+ac4fIaU5uwOAw6s1XUN32zqdECUPSxk4Dg7 +5tGk+fFcL1ZY2G+buOYeAsEDjc8xdET3fs1BBSU5v0rfUJuNJX4Ju1Z4Xlf09yYf +yg3cX8fL19cItwYLOzaG34r4wnkdP65tfk6NkNV+HNO+fF73Hsx0VRlgk0pb0T0N +eNxxg0NqU/T7MK9I1YJcFJz+ame7b0DdAoIBAFw3Sf9LbVVNh8ef4OqjBZR8RCYq +4HeG0FPYvMLzUtFi7j4uBfiL4+pNpSFvecSuLRKE8Pr5dPRJNPNgJud5gvuykBZX +Q9ktQJTAPZK8Q5neLeXfAdoF3szJuEZbDdGSps4JFokVIX+h3c+uFRD9QMSh+bz1 +nEXCdYvmTs+bsTL+l7cbXq2iIKk1QnEcL+cRYr3VjP5xxZ/hGnuYqe9wmyo2MVkS +NVUmCifIvE34TO072HH49gVPrhj9qIZsfBh4LBpl75eKwXTXx+HFqHhP8OfzuK6U +v/JQn9JUGGzkmoMazQ9o5D5h/o0t/OGOPnQeqWL4BIPXdHv/dua6jLnAoU8= +-----END RSA PRIVATE KEY----- diff --git a/pkg/promotel/internal/prometheus/scrape/testdata/server.cer b/pkg/promotel/internal/prometheus/scrape/testdata/server.cer new file mode 100644 index 0000000000..c7608c3812 --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/testdata/server.cer @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIUIcqjmIGMLSIlEGcSinhaw9+fYWowDQYJKoZIhvcNAQEL +BQAwXzELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UE +CgwTRGVmYXVsdCBDb21wYW55IEx0ZDEbMBkGA1UEAwwSUHJvbWV0aGV1cyBUZXN0 +IENBMB4XDTIyMDMxNzA2MzQ1MloXDTQ5MDMxNzA2MzQ1MlowVjELMAkGA1UEBhMC +WFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21w +YW55IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAxCEfTBqVmUD4bLlkHdjPPFm1ruaFNfc/7IF5DobrKt6LgfuC +/4xMImPmpQFz4FJy1ryWgopRBaOimmPKHi3PxrCRMPmwTJ50Tv8bcNLuYuCbHQ+y +XCquRcUFCGb8LZ9pl9I7r1Nny67V2AKaYZ70rZeo+dHp+716C0ukrxFPSz8LHThs +Wdo9LR5zKP7ersDHMOwfzhnUo+jgseCo1aUeoWxA+of7tb1qOEXqWc7gCsQMncq4 +sOWCsQ3MKwNpuWzRTZexxwEnM/4uz6JDAx8qnHYhJAMeqeKilIgLD6w+5+IC/44a +ecbqsYZZCNSy2p/DgOdTgNm+StwRagZrp+rbqQIDAQABo1MwUTAPBgNVHREECDAG +hwR/AAABMB0GA1UdDgQWBBQCsrwnq5c2CYe44tc2i1RyvWX0FDAfBgNVHSMEGDAW +gBTM9QWZ5asSadh4iUoxyvCLC61mGzANBgkqhkiG9w0BAQsFAAOCAQEA4/7pIiWV +5Xc6XBPZIlc8+cmcDP56EIqq5VIBkB6NMdXP8nBbnga0c8o+owhk3za9A8IS7KQ/ +9+rMRPahPHrQFK44/6NiHqARTT9Im32vsH5Dgnl1+Ec2Ni3j+WRB0z3bV/T46Rsj +yVQhJI8FLiKR7hf9VFao46RBKFhi7kT7y2MeelZCfLB6lJiMmINKn9c4ElFXrIHC +RVgdWEZnpalRhADRAIItWj6ynCtD9z0Ohe1JGU5C0RlqMruj6ghtcf5SBujnRhRW +OUbP59kYS/SC1Kos0sfZAqtyRKIrbc6AEbUM9dVIywm3GIo+oMMfxlxeWOnDgrCE +HM7RCqDVnA7SNw== +-----END CERTIFICATE----- diff --git a/pkg/promotel/internal/prometheus/scrape/testdata/server.key b/pkg/promotel/internal/prometheus/scrape/testdata/server.key new file mode 100644 index 0000000000..2266b01500 --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/testdata/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAxCEfTBqVmUD4bLlkHdjPPFm1ruaFNfc/7IF5DobrKt6LgfuC +/4xMImPmpQFz4FJy1ryWgopRBaOimmPKHi3PxrCRMPmwTJ50Tv8bcNLuYuCbHQ+y +XCquRcUFCGb8LZ9pl9I7r1Nny67V2AKaYZ70rZeo+dHp+716C0ukrxFPSz8LHThs +Wdo9LR5zKP7ersDHMOwfzhnUo+jgseCo1aUeoWxA+of7tb1qOEXqWc7gCsQMncq4 +sOWCsQ3MKwNpuWzRTZexxwEnM/4uz6JDAx8qnHYhJAMeqeKilIgLD6w+5+IC/44a +ecbqsYZZCNSy2p/DgOdTgNm+StwRagZrp+rbqQIDAQABAoIBACeOjqNo0TdhtTko +gxrJ+bIwXcZy0/c4cPogeuwFJjU1QWnr8lXcVBazk3dAPcDGoEbTLoARqZm7kTYW +XlOL5dYrEn2QPpCVfNvZ9AzjXhUvO9m2qsCQEyobPJKfQslo14E5c7Q+3DZmgtbY +X47E4pCIgBoyzkBpzM2uaf6tPRLtv8QcLklcf7lP5rd0Zypc325RR6+J5nxfCoFp +fD3sj7t/lJLS8Xb6m4/YFjsVJ2qEAelZ086v8unMBEj324Vv/VqrkPFtFNJKI+Az +Pd9xFDBdsKijBn1Yam9/dj7CiyZYKaVZ9p/w7Oqkpbrt8J8S8OtNHZ4fz9FJgRu9 +uu+VTikCgYEA5ZkDmozDseA/c9JTUGAiPfAt5OrnqlKQNzp2m19GKh+Mlwg4k6O5 +uE+0vaQEfc0cX3o8qntWNsb63XC9h6oHewrdyVFMZNS4nzzmKEvGWt9ON6qfQDUs +1cgZ0Y/uKydDX/3hk/hnJbeRW429rk0/GTuSHHilBzhE0uXJ11xPG48CgYEA2q7a +yqTdqPmZFIAYT9ny099PhnGYE6cJljTUMX9Xhk4POqcigcq9kvNNsly2O1t0Eq0H +2tYo91xTCZc3Cb0N+Vx3meLIljnzhEtwzU9w6W5VGJHWiqovjGwtCdm/W28OlMzY +zM+0gVCJzZLhL0vOwBLwGUJvjgfpvgIb/W+C2UcCgYB5TJ3ayQOath7P0g6yKBfv +ITUd+/zovzXx97Ex5OPs3T4pjO5XEejMt0+F4WF+FR8oUiw65W5nAjkHRMjdI7dQ +Ci2ibpEttDTV7Bass1vYJqHsRvhbs7w8NbtuO9xYcCXoUPkcc+AKzTC+beQIckcj +zZUj9Zk6dz/lLAG3Bc3FgQKBgQC+MmZI6auAU9Y4ZlC+4qi4bfkUzaefMCC+a6RC +iKbvQOUt9j+k81h+fu6MuuYkKh6CP8wdITbwLXRrWwGbjrqgrzO2u/AJ+M07uwGZ +EAb8f+GzROR8JhjE4TEq6B/uvmDIOoI1YFF2Rz4TdjQ0lpJzrAT3czjjJy68+8is +XFhJ8QKBgQCMPpB7taMLQzuilEGabL6Xas9UxryiGoBHk4Umb107GVWgwXxWT6fk +YSlvbMQHCgVeaJe374Bghyw33Z3WilWM1fCWya/CxXlw9wakjQHiqFCIOCxdgosX +Sr35bRFWJMnHXD+jD0Vr8WrtbGzFSZb3ZrjT6WhWRIGCHcaMANN9ew== +-----END RSA PRIVATE KEY----- diff --git a/pkg/promotel/internal/prometheus/scrape/testdata/servername.cer b/pkg/promotel/internal/prometheus/scrape/testdata/servername.cer new file mode 100644 index 0000000000..d1aea98b12 --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/testdata/servername.cer @@ -0,0 +1,72 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 1 (0x1) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C = XX, L = Default City, O = Default Company Ltd, CN = Prometheus Test CA + Validity + Not Before: Aug 12 19:58:32 2020 GMT + Not After : Nov 27 19:58:32 2041 GMT + Subject: C = XX, ST = State, L = City, O = Prom Test Inc., CN = Test + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public-Key: (2048 bit) + Modulus: + 00:ca:58:23:ba:98:7f:ce:08:b5:03:62:92:d0:8c: + 3f:ef:7c:dd:a2:6e:38:f4:6c:3e:0c:04:c0:a4:bd: + 6c:29:85:13:01:50:64:d2:e0:e9:11:92:30:7e:e6: + 04:03:3e:91:6f:30:60:cd:d6:30:d7:47:26:0c:20: + c3:6f:49:4d:4c:11:78:57:4f:1b:18:05:dc:61:e2: + 64:2a:72:07:45:9a:6b:4c:fb:3b:3d:60:1a:8b:58: + c4:d0:7e:a9:87:e4:e1:b8:78:21:3e:4b:bb:38:07: + b2:4e:f6:32:b7:98:b4:8e:45:36:be:02:d2:3d:af: + 70:2d:c8:82:7e:fc:2d:77:a4:b5:82:cb:61:7c:49: + 7c:62:c2:87:88:45:44:fb:98:78:9c:b8:f3:a7:c3: + 08:80:60:4d:3e:1c:d0:bf:05:bc:c7:a6:e9:d3:08: + 6e:57:33:85:44:74:75:54:17:8a:e6:bc:46:d0:b6: + c2:55:4d:d0:e6:83:d5:48:bb:91:4e:48:d0:ea:f3: + 6d:9b:fa:99:3d:3c:36:98:02:e3:a5:15:2b:37:16: + c0:e3:9d:da:13:7d:df:a0:91:db:de:01:7c:e5:b4: + ad:eb:b1:7b:82:51:70:c3:9b:2a:1a:dc:53:8a:3b: + bc:b7:c8:0d:cb:b5:15:d1:13:a5:92:80:bb:a3:5a: + 4c:b3 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Alternative Name: + DNS:prometheus.rocks + Signature Algorithm: sha256WithRSAEncryption + 00:12:d7:ac:b7:ef:64:83:22:bb:34:3b:48:1e:c1:2c:e7:57: + fd:cd:0e:9a:fa:91:7c:f2:a7:72:45:ac:9c:8d:1b:ef:eb:5b: + 62:5a:c3:a3:f2:53:75:23:25:6b:6c:ec:6e:c0:de:92:f6:11: + 17:93:6d:b6:0d:e4:13:a3:eb:c5:ee:33:2e:f9:97:73:65:d2: + b2:23:c0:87:63:67:02:78:a4:28:4b:df:f2:c5:47:dc:49:27: + 06:53:12:d7:6d:53:95:eb:01:e3:58:6e:0c:44:43:30:bc:b0: + d7:0b:14:c7:81:8e:0d:a3:44:ee:2c:89:32:ab:8d:21:54:ed: + 8d:9a:fb:2f:f5:53:1d:ec:43:d4:ef:76:c7:5b:dd:3c:54:df: + 29:f7:64:34:e9:7e:98:49:1b:26:3c:52:77:43:70:f8:2c:8c: + 2f:af:24:cc:81:58:be:8d:b9:6e:2e:e8:75:9a:27:6e:24:05: + eb:dc:df:c6:23:93:5b:f1:03:c6:0a:4d:ad:d7:0e:cd:bc:e9: + 6e:38:c7:7f:ba:af:1b:91:1c:37:77:b6:0c:4d:81:da:04:3a: + eb:4a:c4:8a:41:29:9f:45:a0:0f:1b:d1:32:28:48:77:5d:f4: + 3a:71:7d:87:b0:98:c4:c3:94:e1:cf:65:cf:12:e2:90:02:48: + a8:b7:a1:5d +-----BEGIN CERTIFICATE----- +MIIDTTCCAjWgAwIBAgIBATANBgkqhkiG9w0BAQsFADBfMQswCQYDVQQGEwJYWDEV +MBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkg +THRkMRswGQYDVQQDDBJQcm9tZXRoZXVzIFRlc3QgQ0EwHhcNMjAwODEyMTk1ODMy +WhcNNDExMTI3MTk1ODMyWjBUMQswCQYDVQQGEwJYWDEOMAwGA1UECAwFU3RhdGUx +DTALBgNVBAcMBENpdHkxFzAVBgNVBAoMDlByb20gVGVzdCBJbmMuMQ0wCwYDVQQD +DARUZXN0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAylgjuph/zgi1 +A2KS0Iw/73zdom449Gw+DATApL1sKYUTAVBk0uDpEZIwfuYEAz6RbzBgzdYw10cm +DCDDb0lNTBF4V08bGAXcYeJkKnIHRZprTPs7PWAai1jE0H6ph+ThuHghPku7OAey +TvYyt5i0jkU2vgLSPa9wLciCfvwtd6S1gsthfEl8YsKHiEVE+5h4nLjzp8MIgGBN +PhzQvwW8x6bp0whuVzOFRHR1VBeK5rxG0LbCVU3Q5oPVSLuRTkjQ6vNtm/qZPTw2 +mALjpRUrNxbA453aE33foJHb3gF85bSt67F7glFww5sqGtxTiju8t8gNy7UV0ROl +koC7o1pMswIDAQABox8wHTAbBgNVHREEFDASghBwcm9tZXRoZXVzLnJvY2tzMA0G +CSqGSIb3DQEBCwUAA4IBAQAAEtest+9kgyK7NDtIHsEs51f9zQ6a+pF88qdyRayc +jRvv61tiWsOj8lN1IyVrbOxuwN6S9hEXk222DeQTo+vF7jMu+ZdzZdKyI8CHY2cC +eKQoS9/yxUfcSScGUxLXbVOV6wHjWG4MREMwvLDXCxTHgY4No0TuLIkyq40hVO2N +mvsv9VMd7EPU73bHW908VN8p92Q06X6YSRsmPFJ3Q3D4LIwvryTMgVi+jbluLuh1 +miduJAXr3N/GI5Nb8QPGCk2t1w7NvOluOMd/uq8bkRw3d7YMTYHaBDrrSsSKQSmf +RaAPG9EyKEh3XfQ6cX2HsJjEw5Thz2XPEuKQAkiot6Fd +-----END CERTIFICATE----- diff --git a/pkg/promotel/internal/prometheus/scrape/testdata/servername.key b/pkg/promotel/internal/prometheus/scrape/testdata/servername.key new file mode 100644 index 0000000000..95d6aca525 --- /dev/null +++ b/pkg/promotel/internal/prometheus/scrape/testdata/servername.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAylgjuph/zgi1A2KS0Iw/73zdom449Gw+DATApL1sKYUTAVBk +0uDpEZIwfuYEAz6RbzBgzdYw10cmDCDDb0lNTBF4V08bGAXcYeJkKnIHRZprTPs7 +PWAai1jE0H6ph+ThuHghPku7OAeyTvYyt5i0jkU2vgLSPa9wLciCfvwtd6S1gsth +fEl8YsKHiEVE+5h4nLjzp8MIgGBNPhzQvwW8x6bp0whuVzOFRHR1VBeK5rxG0LbC +VU3Q5oPVSLuRTkjQ6vNtm/qZPTw2mALjpRUrNxbA453aE33foJHb3gF85bSt67F7 +glFww5sqGtxTiju8t8gNy7UV0ROlkoC7o1pMswIDAQABAoIBADZ5vETEQcRKe9FJ +fJVA7QWg7FqKqjLD4YCC1wqDJNeYyCEWb86GVrkwTnYbnwDwm17/+0/vVn7e3NNv +Dq6rYXAVU/zNg1HYYhjIRodW47ZNeI3lJXHEqeDSKUqojyPS7yIm1WxcHy9agxrX +FZhwOEwFPlOxlsCcturcjKV7ZxJKftiWoyPodQLjlEmNoD/MQ6Obuge1dQZRLwCk +/R+EcTWHN4A+rpnZLoKFEaw5p7DTjdKSGOu+EFB+lrEg5kTOCN/kR0PYGnDH1Ygd +6/DmP0xiPpT2pKudTtI7f+QoPtff+GJ47Xy1oYks/cXUJiJbtCT9wyKQtR5mZRUc +ruNWBCECgYEA9e87HbUaMA4tAqaur684RTFAqpDjDBB8tDAxbnuQrv6947odgQHu +YcBAneL2HIvUMuusI0X52nGRwt+qOSXiS1WQwA1P44qR28VYxLIkgK1xMEpezClU +xIavMzwZtmjCZ84Q6H/qvVuqa5MuE4pe6O9vnb4cUWF280ngmf+zViUCgYEA0qAx +qzh6cUBSF6PAV+7QKXB4YLfvLloX3qwC+qkdaGjacREb7URxTKs1lHLhpmHwoPN+ +aXccxNs443Z67AK68N2RAOVw3z1IPTmSUzL7HCKqzZtRXsj+Lm8bj9sRzvWuE7RU +X2QW+9ppAvjwwrhG0vXCs3yua2usMyHjr6ekw/cCgYBSut0qCyf6Dmq5v5R36PuG +2yCjwAWAo3Mvsh6OyeZL18nM92jBYwLrwx55fkXIKImDb6ACZaG9CAM+iLrcapAL +Q4dj85ZyNsUGJwbLdBmvZ6jx07K7/xNS4PPCym7j2625+anabF1swY88jNAtJpjy +xsjHSZKBFcZL5Qg3BbswOQKBgHigD/IMRWtot9scCAMUHRkudXKGxK9aH4OCJa6i +fdoW+st4TfMjmHOdNfFPndWpD6NN8B68fbhsCHeUmi9iHOfnLK1DudHQCfguaZPG +hbOGUyWvhvluyMuVDEbl4pwRbeGRDCUZcGRKoIt4QIJ0APO+lgQvKsEQiC08gmZN +73nfAoGAKXVVV7dN59gohMTRWsOSGP+YLEj8+rGZZYNKCLVTol0VQ7T30tA0P4Cf +Dw9oLKGnDdgTtJA6Fsms858B6ANC+6Hxd9LG0ecOevKMBFHuWPm56Z0ofDzoPVBW +eDuHeR5xF0xq5PIFl/mIJJ1NK0p1Do9gwqEEIftdNyrcGefGdXk= +-----END RSA PRIVATE KEY----- diff --git a/pkg/promotel/internal/prometheus/textparse/README.md b/pkg/promotel/internal/prometheus/textparse/README.md new file mode 100644 index 0000000000..697966f097 --- /dev/null +++ b/pkg/promotel/internal/prometheus/textparse/README.md @@ -0,0 +1,6 @@ +# Making changes to textparse lexers +In the rare case that you need to update the textparse lexers, edit promlex.l or openmetricslex.l and then run the following command: +`golex -o=promlex.l.go promlex.l` + +Note that you need golex installed: +`go get -u modernc.org/golex` \ No newline at end of file diff --git a/pkg/promotel/internal/prometheus/textparse/promotel.go b/pkg/promotel/internal/prometheus/textparse/promotel.go new file mode 100644 index 0000000000..12fddf2877 --- /dev/null +++ b/pkg/promotel/internal/prometheus/textparse/promotel.go @@ -0,0 +1,61 @@ +package textparse + +import ( + "bytes" + "fmt" + "io" + + "github.com/gogo/protobuf/proto" + io_prometheus_client "github.com/prometheus/client_model/go" + "github.com/prometheus/prometheus/model/labels" + promtextparse "github.com/prometheus/prometheus/model/textparse" + dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" +) + +func convertMetricFamilyPb(srcMf *io_prometheus_client.MetricFamily, dst *dto.MetricFamily) (n int, err error) { + protoBuf, err := proto.Marshal(srcMf) + if err != nil { + return 0, err + } + dst.Reset() + err = dst.Unmarshal(protoBuf) + if err != nil { + return 0, err + } + return len(protoBuf), nil +} + +type ProtobufParserShim struct { + *ProtobufParser + mfs []*io_prometheus_client.MetricFamily + index int +} + +// Used to override readDelimited method of the ProtobufParser. +func (p *ProtobufParserShim) readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { + if p == nil || p.index >= len(p.mfs) { + return 0, io.EOF + } + // Copies proto message from io_prometheus_client.MetricFamily to dto.MetricFamily + _, err = convertMetricFamilyPb(p.mfs[p.index], mf) + if err != nil { + // todo: test this + return 0, fmt.Errorf("failed to convert io_prometheus_client.MetricFamily to dto.MetricFamily: %w", err) + } + p.index++ + return 0, nil +} + +func NewProtobufParserShim(parseClassicHistograms bool, st *labels.SymbolTable, mfs []*io_prometheus_client.MetricFamily) promtextparse.Parser { + p := &ProtobufParserShim{&ProtobufParser{ + in: []byte{}, + state: promtextparse.EntryInvalid, + mf: &dto.MetricFamily{}, + metricBytes: &bytes.Buffer{}, + parseClassicHistograms: parseClassicHistograms, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + }, mfs, 0} + // Overrides readDelimited method of the ProtobufParser + p.ProtobufParser.readDelimitedFunc = p.readDelimited + return p +} diff --git a/pkg/promotel/internal/prometheus/textparse/promotel_test.go b/pkg/promotel/internal/prometheus/textparse/promotel_test.go new file mode 100644 index 0000000000..7389d957b6 --- /dev/null +++ b/pkg/promotel/internal/prometheus/textparse/promotel_test.go @@ -0,0 +1,1629 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "errors" + "io" + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/prometheus/client_golang/prometheus" + io_prometheus_client "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" + promtextparse "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/util/testutil" + "github.com/stretchr/testify/require" +) + +func createTestProtoBufGatherer(t *testing.T) *testGatherer { + testMetricFamilies, _ := createTestProtoBuf(t) + + metrics := make([]*io_prometheus_client.MetricFamily, 0, len(testMetricFamilies)) + for _, tmf := range testMetricFamilies { + pb := &io_prometheus_client.MetricFamily{} + // From text to proto message. + require.NoError(t, proto.UnmarshalText(tmf, pb)) + metrics = append(metrics, pb) + } + + return &testGatherer{metrics} +} + +type testGatherer struct { + metrics []*io_prometheus_client.MetricFamily +} + +var _ prometheus.Gatherer = &testGatherer{} + +func (m *testGatherer) Gather() ([]*io_prometheus_client.MetricFamily, error) { + return m.metrics, nil +} + +func TestProtobufParseShim(t *testing.T) { + type parseResult struct { + lset labels.Labels + m string + t int64 + v float64 + typ model.MetricType + help string + unit string + comment string + shs *histogram.Histogram + fhs *histogram.FloatHistogram + e []exemplar.Exemplar + ct int64 + } + + testPromGatherer := createTestProtoBufGatherer(t) + + metrics, err := testPromGatherer.Gather() + require.NoError(t, err) + scenarios := []struct { + name string + parser promtextparse.Parser + expected []parseResult + }{ + { + name: "ignore classic buckets of native histograms", + parser: NewProtobufParserShim(false, labels.NewSymbolTable(), metrics), + expected: []parseResult{ + { + m: "go_build_info", + help: "Build information about the main Go module.", + }, + { + m: "go_build_info", + typ: model.MetricTypeGauge, + }, + { + m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", + v: 1, + lset: labels.FromStrings( + "__name__", "go_build_info", + "checksum", "", + "path", "github.com/prometheus/client_golang", + "version", "(devel)", + ), + }, + { + m: "go_memstats_alloc_bytes_total", + help: "Total number of bytes allocated, even if freed.", + unit: "bytes", + }, + { + m: "go_memstats_alloc_bytes_total", + typ: model.MetricTypeCounter, + }, + { + m: "go_memstats_alloc_bytes_total", + v: 1.546544e+06, + lset: labels.FromStrings( + "__name__", "go_memstats_alloc_bytes_total", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, + }, + }, + { + m: "something_untyped", + help: "Just to test the untyped type.", + }, + { + m: "something_untyped", + typ: model.MetricTypeUnknown, + }, + { + m: "something_untyped", + t: 1234567, + v: 42, + lset: labels.FromStrings( + "__name__", "something_untyped", + ), + }, + { + m: "test_histogram", + help: "Test histogram with many buckets removed to keep it manageable in size.", + }, + { + m: "test_histogram", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { + m: "test_gauge_histogram", + help: "Like test_histogram but as gauge histogram.", + }, + { + m: "test_gauge_histogram", + typ: model.MetricTypeGaugeHistogram, + }, + { + m: "test_gauge_histogram", + t: 1234568, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { + m: "test_float_histogram", + help: "Test float histogram with many buckets removed to keep it manageable in size.", + }, + { + m: "test_float_histogram", + typ: model.MetricTypeHistogram, + }, + { + m: "test_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { + m: "test_gauge_float_histogram", + help: "Like test_float_histogram but as gauge histogram.", + }, + { + m: "test_gauge_float_histogram", + typ: model.MetricTypeGaugeHistogram, + }, + { + m: "test_gauge_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { + m: "test_histogram2", + help: "Similar histogram as before but now without sparse buckets.", + }, + { + m: "test_histogram2", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram2_count", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_count", + ), + }, + { + m: "test_histogram2_sum", + v: 0.000828, + lset: labels.FromStrings( + "__name__", "test_histogram2_sum", + ), + }, + { + m: "test_histogram2_bucket\xffle\xff-0.00048", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00048", + ), + }, + { + m: "test_histogram2_bucket\xffle\xff-0.00038", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00038", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram2_bucket\xffle\xff1.0", + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "1.0", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, + }, + }, + { + m: "test_histogram2_bucket\xffle\xff+Inf", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "+Inf", + ), + }, + { + m: "test_histogram_family", + help: "Test histogram metric family with two very simple histograms.", + }, + { + m: "test_histogram_family", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_family\xfffoo\xffbar", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 5, + Sum: 12.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "bar", + ), + }, + { + m: "test_histogram_family\xfffoo\xffbaz", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 13.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{1, 4}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "baz", + ), + }, + { + m: "test_float_histogram_with_zerothreshold_zero", + help: "Test float histogram with a zero threshold of zero.", + }, + { + m: "test_float_histogram_with_zerothreshold_zero", + typ: model.MetricTypeHistogram, + }, + { + m: "test_float_histogram_with_zerothreshold_zero", + fhs: &histogram.FloatHistogram{ + Count: 5.0, + Sum: 12.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + PositiveBuckets: []float64{2.0, 3.0}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_float_histogram_with_zerothreshold_zero", + ), + }, + { + m: "rpc_durations_seconds", + help: "RPC latency distributions.", + }, + { + m: "rpc_durations_seconds", + typ: model.MetricTypeSummary, + }, + { + m: "rpc_durations_seconds_count\xffservice\xffexponential", + v: 262, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_count", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds_sum\xffservice\xffexponential", + v: 0.00025551262820703587, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_sum", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", + v: 6.442786329648548e-07, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.5", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", + v: 1.9435742936658396e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.9", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", + v: 4.0471608667037015e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.99", + "service", "exponential", + ), + }, + { + m: "without_quantiles", + help: "A summary without quantiles.", + }, + { + m: "without_quantiles", + typ: model.MetricTypeSummary, + }, + { + m: "without_quantiles_count", + v: 42, + lset: labels.FromStrings( + "__name__", "without_quantiles_count", + ), + }, + { + m: "without_quantiles_sum", + v: 1.234, + lset: labels.FromStrings( + "__name__", "without_quantiles_sum", + ), + }, + { + m: "empty_histogram", + help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", + }, + { + m: "empty_histogram", + typ: model.MetricTypeHistogram, + }, + { + m: "empty_histogram", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "empty_histogram", + ), + }, + { + m: "test_counter_with_createdtimestamp", + help: "A counter with a created timestamp.", + }, + { + m: "test_counter_with_createdtimestamp", + typ: model.MetricTypeCounter, + }, + { + m: "test_counter_with_createdtimestamp", + v: 42, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_counter_with_createdtimestamp", + ), + }, + { + m: "test_summary_with_createdtimestamp", + help: "A summary with a created timestamp.", + }, + { + m: "test_summary_with_createdtimestamp", + typ: model.MetricTypeSummary, + }, + { + m: "test_summary_with_createdtimestamp_count", + v: 42, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_summary_with_createdtimestamp_count", + ), + }, + { + m: "test_summary_with_createdtimestamp_sum", + v: 1.234, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_summary_with_createdtimestamp_sum", + ), + }, + { + m: "test_histogram_with_createdtimestamp", + help: "A histogram with a created timestamp.", + }, + { + m: "test_histogram_with_createdtimestamp", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_createdtimestamp", + ct: 1000, + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_createdtimestamp", + ), + }, + { + m: "test_gaugehistogram_with_createdtimestamp", + help: "A gauge histogram with a created timestamp.", + }, + { + m: "test_gaugehistogram_with_createdtimestamp", + typ: model.MetricTypeGaugeHistogram, + }, + { + m: "test_gaugehistogram_with_createdtimestamp", + ct: 1000, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_gaugehistogram_with_createdtimestamp", + ), + }, + { + m: "test_histogram_with_native_histogram_exemplars", + help: "A histogram with native histogram exemplars.", + }, + { + m: "test_histogram_with_native_histogram_exemplars", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_native_histogram_exemplars", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, + }, + }, + }, + }, + { + name: "parse classic and native buckets", + parser: NewProtobufParserShim(true, labels.NewSymbolTable(), metrics), + expected: []parseResult{ + { // 0 + m: "go_build_info", + help: "Build information about the main Go module.", + }, + { // 1 + m: "go_build_info", + typ: model.MetricTypeGauge, + }, + { // 2 + m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", + v: 1, + lset: labels.FromStrings( + "__name__", "go_build_info", + "checksum", "", + "path", "github.com/prometheus/client_golang", + "version", "(devel)", + ), + }, + { // 3 + m: "go_memstats_alloc_bytes_total", + help: "Total number of bytes allocated, even if freed.", + }, + { // 4 + m: "go_memstats_alloc_bytes_total", + typ: model.MetricTypeCounter, + }, + { // 5 + m: "go_memstats_alloc_bytes_total", + v: 1.546544e+06, + lset: labels.FromStrings( + "__name__", "go_memstats_alloc_bytes_total", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, + }, + }, + { // 6 + m: "something_untyped", + help: "Just to test the untyped type.", + }, + { // 7 + m: "something_untyped", + typ: model.MetricTypeUnknown, + }, + { // 8 + m: "something_untyped", + t: 1234567, + v: 42, + lset: labels.FromStrings( + "__name__", "something_untyped", + ), + }, + { // 9 + m: "test_histogram", + help: "Test histogram with many buckets removed to keep it manageable in size.", + }, + { // 10 + m: "test_histogram", + typ: model.MetricTypeHistogram, + }, + { // 11 + m: "test_histogram", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 12 + m: "test_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_count", + ), + }, + { // 13 + m: "test_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_histogram_sum", + ), + }, + { // 14 + m: "test_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 15 + m: "test_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 16 + m: "test_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 17 + m: "test_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "+Inf", + ), + }, + { // 18 + m: "test_gauge_histogram", + help: "Like test_histogram but as gauge histogram.", + }, + { // 19 + m: "test_gauge_histogram", + typ: model.MetricTypeGaugeHistogram, + }, + { // 20 + m: "test_gauge_histogram", + t: 1234568, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 21 + m: "test_gauge_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_count", + ), + }, + { // 22 + m: "test_gauge_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_sum", + ), + }, + { // 23 + m: "test_gauge_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 24 + m: "test_gauge_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 25 + m: "test_gauge_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 26 + m: "test_gauge_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "+Inf", + ), + }, + { // 27 + m: "test_float_histogram", + help: "Test float histogram with many buckets removed to keep it manageable in size.", + }, + { // 28 + m: "test_float_histogram", + typ: model.MetricTypeHistogram, + }, + { // 29 + m: "test_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 30 + m: "test_float_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_float_histogram_count", + ), + }, + { // 31 + m: "test_float_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_float_histogram_sum", + ), + }, + { // 32 + m: "test_float_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 33 + m: "test_float_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 34 + m: "test_float_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 35 + m: "test_float_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "+Inf", + ), + }, + { // 36 + m: "test_gauge_float_histogram", + help: "Like test_float_histogram but as gauge histogram.", + }, + { // 37 + m: "test_gauge_float_histogram", + typ: model.MetricTypeGaugeHistogram, + }, + { // 38 + m: "test_gauge_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 39 + m: "test_gauge_float_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_count", + ), + }, + { // 40 + m: "test_gauge_float_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_sum", + ), + }, + { // 41 + m: "test_gauge_float_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 42 + m: "test_gauge_float_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 43 + m: "test_gauge_float_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 44 + m: "test_gauge_float_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "+Inf", + ), + }, + { // 45 + m: "test_histogram2", + help: "Similar histogram as before but now without sparse buckets.", + }, + { // 46 + m: "test_histogram2", + typ: model.MetricTypeHistogram, + }, + { // 47 + m: "test_histogram2_count", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_count", + ), + }, + { // 48 + m: "test_histogram2_sum", + v: 0.000828, + lset: labels.FromStrings( + "__name__", "test_histogram2_sum", + ), + }, + { // 49 + m: "test_histogram2_bucket\xffle\xff-0.00048", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00048", + ), + }, + { // 50 + m: "test_histogram2_bucket\xffle\xff-0.00038", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00038", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + }, + }, + { // 51 + m: "test_histogram2_bucket\xffle\xff1.0", + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "1.0", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, + }, + }, + { // 52 + m: "test_histogram2_bucket\xffle\xff+Inf", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "+Inf", + ), + }, + { // 53 + m: "test_histogram_family", + help: "Test histogram metric family with two very simple histograms.", + }, + { // 54 + m: "test_histogram_family", + typ: model.MetricTypeHistogram, + }, + { // 55 + m: "test_histogram_family\xfffoo\xffbar", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 5, + Sum: 12.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "bar", + ), + }, + { // 56 + m: "test_histogram_family_count\xfffoo\xffbar", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_count", + "foo", "bar", + ), + }, + { // 57 + m: "test_histogram_family_sum\xfffoo\xffbar", + v: 12.1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_sum", + "foo", "bar", + ), + }, + { // 58 + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff1.1", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "1.1", + ), + }, + { // 59 + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff2.2", + v: 3, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "2.2", + ), + }, + { // 60 + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff+Inf", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "+Inf", + ), + }, + { // 61 + m: "test_histogram_family\xfffoo\xffbaz", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 13.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{1, 4}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "baz", + ), + }, + { // 62 + m: "test_histogram_family_count\xfffoo\xffbaz", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram_family_count", + "foo", "baz", + ), + }, + { // 63 + m: "test_histogram_family_sum\xfffoo\xffbaz", + v: 13.1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_sum", + "foo", "baz", + ), + }, + { // 64 + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff1.1", + v: 1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "1.1", + ), + }, + { // 65 + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff2.2", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "2.2", + ), + }, + { // 66 + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff+Inf", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "+Inf", + ), + }, + { // 67 + m: "test_float_histogram_with_zerothreshold_zero", + help: "Test float histogram with a zero threshold of zero.", + }, + { // 68 + m: "test_float_histogram_with_zerothreshold_zero", + typ: model.MetricTypeHistogram, + }, + { // 69 + m: "test_float_histogram_with_zerothreshold_zero", + fhs: &histogram.FloatHistogram{ + Count: 5.0, + Sum: 12.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + PositiveBuckets: []float64{2.0, 3.0}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_float_histogram_with_zerothreshold_zero", + ), + }, + { // 70 + m: "rpc_durations_seconds", + help: "RPC latency distributions.", + }, + { // 71 + m: "rpc_durations_seconds", + typ: model.MetricTypeSummary, + }, + { // 72 + m: "rpc_durations_seconds_count\xffservice\xffexponential", + v: 262, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_count", + "service", "exponential", + ), + }, + { // 73 + m: "rpc_durations_seconds_sum\xffservice\xffexponential", + v: 0.00025551262820703587, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_sum", + "service", "exponential", + ), + }, + { // 74 + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", + v: 6.442786329648548e-07, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.5", + "service", "exponential", + ), + }, + { // 75 + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", + v: 1.9435742936658396e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.9", + "service", "exponential", + ), + }, + { // 76 + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", + v: 4.0471608667037015e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.99", + "service", "exponential", + ), + }, + { // 77 + m: "without_quantiles", + help: "A summary without quantiles.", + }, + { // 78 + m: "without_quantiles", + typ: model.MetricTypeSummary, + }, + { // 79 + m: "without_quantiles_count", + v: 42, + lset: labels.FromStrings( + "__name__", "without_quantiles_count", + ), + }, + { // 80 + m: "without_quantiles_sum", + v: 1.234, + lset: labels.FromStrings( + "__name__", "without_quantiles_sum", + ), + }, + { // 78 + m: "empty_histogram", + help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", + }, + { // 79 + m: "empty_histogram", + typ: model.MetricTypeHistogram, + }, + { // 80 + m: "empty_histogram", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "empty_histogram", + ), + }, + { // 81 + m: "test_counter_with_createdtimestamp", + help: "A counter with a created timestamp.", + }, + { // 82 + m: "test_counter_with_createdtimestamp", + typ: model.MetricTypeCounter, + }, + { // 83 + m: "test_counter_with_createdtimestamp", + v: 42, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_counter_with_createdtimestamp", + ), + }, + { // 84 + m: "test_summary_with_createdtimestamp", + help: "A summary with a created timestamp.", + }, + { // 85 + m: "test_summary_with_createdtimestamp", + typ: model.MetricTypeSummary, + }, + { // 86 + m: "test_summary_with_createdtimestamp_count", + v: 42, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_summary_with_createdtimestamp_count", + ), + }, + { // 87 + m: "test_summary_with_createdtimestamp_sum", + v: 1.234, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_summary_with_createdtimestamp_sum", + ), + }, + { // 88 + m: "test_histogram_with_createdtimestamp", + help: "A histogram with a created timestamp.", + }, + { // 89 + m: "test_histogram_with_createdtimestamp", + typ: model.MetricTypeHistogram, + }, + { // 90 + m: "test_histogram_with_createdtimestamp", + ct: 1000, + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_createdtimestamp", + ), + }, + { // 91 + m: "test_gaugehistogram_with_createdtimestamp", + help: "A gauge histogram with a created timestamp.", + }, + { // 92 + m: "test_gaugehistogram_with_createdtimestamp", + typ: model.MetricTypeGaugeHistogram, + }, + { // 93 + m: "test_gaugehistogram_with_createdtimestamp", + ct: 1000, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_gaugehistogram_with_createdtimestamp", + ), + }, + { // 94 + m: "test_histogram_with_native_histogram_exemplars", + help: "A histogram with native histogram exemplars.", + }, + { // 95 + m: "test_histogram_with_native_histogram_exemplars", + typ: model.MetricTypeHistogram, + }, + { // 96 + m: "test_histogram_with_native_histogram_exemplars", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, + }, + }, + { // 97 + m: "test_histogram_with_native_histogram_exemplars_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_count", + ), + }, + { // 98 + m: "test_histogram_with_native_histogram_exemplars_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_sum", + ), + }, + { // 99 + m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 100 + m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 101 + m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 102 + m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_bucket", + "le", "+Inf", + ), + }, + }, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + var ( + i int + res labels.Labels + p = scenario.parser + exp = scenario.expected + ) + + for { + et, err := p.Next() + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + + switch et { + case promtextparse.EntrySeries: + m, ts, v := p.Series() + + var e exemplar.Exemplar + p.Metric(&res) + eFound := p.Exemplar(&e) + ct := p.CreatedTimestamp() + require.Equal(t, exp[i].m, string(m), "i: %d", i) + if ts != nil { + require.Equal(t, exp[i].t, *ts, "i: %d", i) + } else { + require.Equal(t, int64(0), exp[i].t, "i: %d", i) + } + require.Equal(t, exp[i].v, v, "i: %d", i) // nolint + testutil.RequireEqual(t, exp[i].lset, res, "i: %d", i) + if len(exp[i].e) == 0 { + require.False(t, eFound, "i: %d", i) + } else { + require.True(t, eFound, "i: %d", i) + testutil.RequireEqual(t, exp[i].e[0], e, "i: %d", i) + require.False(t, p.Exemplar(&e), "too many exemplars returned, i: %d", i) + } + if exp[i].ct != 0 { + require.NotNilf(t, ct, "i: %d", i) + require.Equal(t, exp[i].ct, *ct, "i: %d", i) + } else { + require.Nilf(t, ct, "i: %d", i) + } + + case promtextparse.EntryHistogram: + m, ts, shs, fhs := p.Histogram() + p.Metric(&res) + require.Equal(t, exp[i].m, string(m), "i: %d", i) + if ts != nil { + require.Equal(t, exp[i].t, *ts, "i: %d", i) + } else { + require.Equal(t, int64(0), exp[i].t, "i: %d", i) + } + testutil.RequireEqual(t, exp[i].lset, res, "i: %d", i) + require.Equal(t, exp[i].m, string(m), "i: %d", i) + if shs != nil { + require.Equal(t, exp[i].shs, shs, "i: %d", i) + } else { + require.Equal(t, exp[i].fhs, fhs, "i: %d", i) + } + j := 0 + for e := (exemplar.Exemplar{}); p.Exemplar(&e); j++ { + testutil.RequireEqual(t, exp[i].e[j], e, "i: %d", i) + e = exemplar.Exemplar{} + } + require.Len(t, exp[i].e, j, "not enough exemplars found, i: %d", i) + + case promtextparse.EntryType: + m, typ := p.Type() + require.Equal(t, exp[i].m, string(m), "i: %d", i) + require.Equal(t, exp[i].typ, typ, "i: %d", i) + + case promtextparse.EntryHelp: + m, h := p.Help() + require.Equal(t, exp[i].m, string(m), "i: %d", i) + require.Equal(t, exp[i].help, string(h), "i: %d", i) + + case promtextparse.EntryUnit: + m, u := p.Unit() + require.Equal(t, exp[i].m, string(m), "i: %d", i) + require.Equal(t, exp[i].unit, string(u), "i: %d", i) + + case promtextparse.EntryComment: + require.Equal(t, exp[i].comment, string(p.Comment()), "i: %d", i) + case textparse.EntryInvalid: + t.Fatalf("unexpected invalid entry") + } + + i++ + } + require.Len(t, exp, i) + }) + } +} diff --git a/pkg/promotel/internal/prometheus/textparse/protobufparse.go b/pkg/promotel/internal/prometheus/textparse/protobufparse.go new file mode 100644 index 0000000000..bf20534ce9 --- /dev/null +++ b/pkg/promotel/internal/prometheus/textparse/protobufparse.go @@ -0,0 +1,642 @@ +package textparse + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "strings" + "unicode/utf8" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + promtextparse "github.com/prometheus/prometheus/model/textparse" + + dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" +) + +// ProtobufParser is a very inefficient way of unmarshaling the old Prometheus +// protobuf format and then present it as it if were parsed by a +// Prometheus-2-style text parser. This is only done so that we can easily plug +// in the protobuf format into Prometheus 2. For future use (with the final +// format that will be used for native histograms), we have to revisit the +// parsing. A lot of the efficiency tricks of the Prometheus-2-style parsing +// could be used in a similar fashion (byte-slice pointers into the raw +// payload), which requires some hand-coded protobuf handling. But the current +// parsers all expect the full series name (metric name plus label pairs) as one +// string, which is not how things are represented in the protobuf format. If +// the re-arrangement work is actually causing problems (which has to be seen), +// that expectation needs to be changed. +type ProtobufParser struct { + in []byte // The intput to parse. + inPos int // Position within the input. + metricPos int // Position within Metric slice. + // fieldPos is the position within a Summary or (legacy) Histogram. -2 + // is the count. -1 is the sum. Otherwise it is the index within + // quantiles/buckets. + fieldPos int + fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. + redoClassic bool // true after parsing a native histogram if we need to parse it again as a classic histogram. + // exemplarPos is the position within the exemplars slice of a native histogram. + exemplarPos int + + // exemplarReturned is set to true each time an exemplar has been + // returned, and set back to false upon each Next() call. + exemplarReturned bool + + // state is marked by the entry we are processing. EntryInvalid implies + // that we have to decode the next MetricFamily. + state promtextparse.Entry + + builder labels.ScratchBuilder // held here to reduce allocations when building Labels + + mf *dto.MetricFamily + + // Wether to also parse a classic histogram that is also present as a + // native histogram. + parseClassicHistograms bool + + // The following are just shenanigans to satisfy the Parser interface. + metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric. + + readDelimitedFunc func([]byte, *dto.MetricFamily) (int, error) +} + +// NewProtobufParser returns a parser for the payload in the byte slice. +func NewProtobufParser(b []byte, parseClassicHistograms bool, st *labels.SymbolTable) promtextparse.Parser { + return &ProtobufParser{ + in: b, + state: promtextparse.EntryInvalid, + mf: &dto.MetricFamily{}, + metricBytes: &bytes.Buffer{}, + parseClassicHistograms: parseClassicHistograms, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + } +} + +// Series returns the bytes of a series with a simple float64 as a +// value, the timestamp if set, and the value of the current sample. +func (p *ProtobufParser) Series() ([]byte, *int64, float64) { + var ( + m = p.mf.GetMetric()[p.metricPos] + ts = m.GetTimestampMs() + v float64 + ) + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + v = m.GetCounter().GetValue() + case dto.MetricType_GAUGE: + v = m.GetGauge().GetValue() + case dto.MetricType_UNTYPED: + v = m.GetUntyped().GetValue() + case dto.MetricType_SUMMARY: + s := m.GetSummary() + switch p.fieldPos { + case -2: + v = float64(s.GetSampleCount()) + case -1: + v = s.GetSampleSum() + // Need to detect summaries without quantile here. + if len(s.GetQuantile()) == 0 { + p.fieldsDone = true + } + default: + v = s.GetQuantile()[p.fieldPos].GetValue() + } + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + // This should only happen for a classic histogram. + h := m.GetHistogram() + switch p.fieldPos { + case -2: + v = h.GetSampleCountFloat() + if v == 0 { + v = float64(h.GetSampleCount()) + } + case -1: + v = h.GetSampleSum() + default: + bb := h.GetBucket() + if p.fieldPos >= len(bb) { + v = h.GetSampleCountFloat() + if v == 0 { + v = float64(h.GetSampleCount()) + } + } else { + v = bb[p.fieldPos].GetCumulativeCountFloat() + if v == 0 { + v = float64(bb[p.fieldPos].GetCumulativeCount()) + } + } + } + default: + panic("encountered unexpected metric type, this is a bug") + } + if ts != 0 { + return p.metricBytes.Bytes(), &ts, v + } + // TODO(beorn7): We assume here that ts==0 means no timestamp. That's + // not true in general, but proto3 originally has no distinction between + // unset and default. At a later stage, the `optional` keyword was + // (re-)introduced in proto3, but gogo-protobuf never got updated to + // support it. (Note that setting `[(gogoproto.nullable) = true]` for + // the `timestamp_ms` field doesn't help, either.) We plan to migrate + // away from gogo-protobuf to an actively maintained protobuf + // implementation. Once that's done, we can simply use the `optional` + // keyword and check for the unset state explicitly. + return p.metricBytes.Bytes(), nil, v +} + +// Histogram returns the bytes of a series with a native histogram as a value, +// the timestamp if set, and the native histogram in the current sample. +// +// The Compact method is called before returning the Histogram (or FloatHistogram). +// +// If the SampleCountFloat or the ZeroCountFloat in the proto message is > 0, +// the histogram is parsed and returned as a FloatHistogram and nil is returned +// as the (integer) Histogram return value. Otherwise, it is parsed and returned +// as an (integer) Histogram and nil is returned as the FloatHistogram return +// value. +func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + var ( + m = p.mf.GetMetric()[p.metricPos] + ts = m.GetTimestampMs() + h = m.GetHistogram() + ) + if p.parseClassicHistograms && len(h.GetBucket()) > 0 { + p.redoClassic = true + } + if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 { + // It is a float histogram. + fh := histogram.FloatHistogram{ + Count: h.GetSampleCountFloat(), + Sum: h.GetSampleSum(), + ZeroThreshold: h.GetZeroThreshold(), + ZeroCount: h.GetZeroCountFloat(), + Schema: h.GetSchema(), + PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), + PositiveBuckets: h.GetPositiveCount(), + NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), + NegativeBuckets: h.GetNegativeCount(), + } + for i, span := range h.GetPositiveSpan() { + fh.PositiveSpans[i].Offset = span.GetOffset() + fh.PositiveSpans[i].Length = span.GetLength() + } + for i, span := range h.GetNegativeSpan() { + fh.NegativeSpans[i].Offset = span.GetOffset() + fh.NegativeSpans[i].Length = span.GetLength() + } + if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + fh.CounterResetHint = histogram.GaugeType + } + fh.Compact(0) + if ts != 0 { + return p.metricBytes.Bytes(), &ts, nil, &fh + } + // Nasty hack: Assume that ts==0 means no timestamp. That's not true in + // general, but proto3 has no distinction between unset and + // default. Need to avoid in the final format. + return p.metricBytes.Bytes(), nil, nil, &fh + } + + sh := histogram.Histogram{ + Count: h.GetSampleCount(), + Sum: h.GetSampleSum(), + ZeroThreshold: h.GetZeroThreshold(), + ZeroCount: h.GetZeroCount(), + Schema: h.GetSchema(), + PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), + PositiveBuckets: h.GetPositiveDelta(), + NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), + NegativeBuckets: h.GetNegativeDelta(), + } + for i, span := range h.GetPositiveSpan() { + sh.PositiveSpans[i].Offset = span.GetOffset() + sh.PositiveSpans[i].Length = span.GetLength() + } + for i, span := range h.GetNegativeSpan() { + sh.NegativeSpans[i].Offset = span.GetOffset() + sh.NegativeSpans[i].Length = span.GetLength() + } + if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + sh.CounterResetHint = histogram.GaugeType + } + sh.Compact(0) + if ts != 0 { + return p.metricBytes.Bytes(), &ts, &sh, nil + } + return p.metricBytes.Bytes(), nil, &sh, nil +} + +// Help returns the metric name and help text in the current entry. +// Must only be called after Next returned a help entry. +// The returned byte slices become invalid after the next call to Next. +func (p *ProtobufParser) Help() ([]byte, []byte) { + return p.metricBytes.Bytes(), []byte(p.mf.GetHelp()) +} + +// Type returns the metric name and type in the current entry. +// Must only be called after Next returned a type entry. +// The returned byte slices become invalid after the next call to Next. +func (p *ProtobufParser) Type() ([]byte, model.MetricType) { + n := p.metricBytes.Bytes() + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + return n, model.MetricTypeCounter + case dto.MetricType_GAUGE: + return n, model.MetricTypeGauge + case dto.MetricType_HISTOGRAM: + return n, model.MetricTypeHistogram + case dto.MetricType_GAUGE_HISTOGRAM: + return n, model.MetricTypeGaugeHistogram + case dto.MetricType_SUMMARY: + return n, model.MetricTypeSummary + default: + return n, model.MetricTypeUnknown + } +} + +// Unit returns the metric unit in the current entry. +// Must only be called after Next returned a unit entry. +// The returned byte slices become invalid after the next call to Next. +func (p *ProtobufParser) Unit() ([]byte, []byte) { + return p.metricBytes.Bytes(), []byte(p.mf.GetUnit()) +} + +// Comment always returns nil because comments aren't supported by the protobuf +// format. +func (p *ProtobufParser) Comment() []byte { + return nil +} + +// Metric writes the labels of the current sample into the passed labels. +// It returns the string from which the metric was parsed. +func (p *ProtobufParser) Metric(l *labels.Labels) string { + p.builder.Reset() + p.builder.Add(labels.MetricName, p.getMagicName()) + + for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { + p.builder.Add(lp.GetName(), lp.GetValue()) + } + if needed, name, value := p.getMagicLabel(); needed { + p.builder.Add(name, value) + } + + // Sort labels to maintain the sorted labels invariant. + p.builder.Sort() + *l = p.builder.Labels() + + return p.metricBytes.String() +} + +// Exemplar writes the exemplar of the current sample into the passed +// exemplar. It returns if an exemplar exists or not. In case of a native +// histogram, the exemplars in the native histogram will be returned. +// If this field is empty, the classic bucket section is still used for exemplars. +// To ingest all exemplars, call the Exemplar method repeatedly until it returns false. +func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { + if p.exemplarReturned && p.state == promtextparse.EntrySeries { + // We only ever return one exemplar per (non-native-histogram) series. + return false + } + m := p.mf.GetMetric()[p.metricPos] + var exProto *dto.Exemplar + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + exProto = m.GetCounter().GetExemplar() + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + isClassic := p.state == promtextparse.EntrySeries + if !isClassic && len(m.GetHistogram().GetExemplars()) > 0 { + exs := m.GetHistogram().GetExemplars() + for p.exemplarPos < len(exs) { + exProto = exs[p.exemplarPos] + p.exemplarPos++ + if exProto != nil && exProto.GetTimestamp() != nil { + break + } + } + if exProto != nil && exProto.GetTimestamp() == nil { + return false + } + } else { + bb := m.GetHistogram().GetBucket() + if p.fieldPos < 0 { + if isClassic { + return false // At _count or _sum. + } + p.fieldPos = 0 // Start at 1st bucket for native histograms. + } + for p.fieldPos < len(bb) { + exProto = bb[p.fieldPos].GetExemplar() + if isClassic { + break + } + p.fieldPos++ + // We deliberately drop exemplars with no timestamp only for native histograms. + if exProto != nil && (isClassic || exProto.GetTimestamp() != nil) { + break // Found a classic histogram exemplar or a native histogram exemplar with a timestamp. + } + } + // If the last exemplar for native histograms has no timestamp, ignore it. + if !isClassic && exProto.GetTimestamp() == nil { + return false + } + } + default: + return false + } + if exProto == nil { + return false + } + ex.Value = exProto.GetValue() + if ts := exProto.GetTimestamp(); ts != nil { + ex.HasTs = true + ex.Ts = ts.GetSeconds()*1000 + int64(ts.GetNanos()/1_000_000) + } + p.builder.Reset() + for _, lp := range exProto.GetLabel() { + p.builder.Add(lp.GetName(), lp.GetValue()) + } + p.builder.Sort() + ex.Labels = p.builder.Labels() + p.exemplarReturned = true + return true +} + +// CreatedTimestamp returns CT or nil if CT is not present or +// invalid (as timestamp e.g. negative value) on counters, summaries or histograms. +func (p *ProtobufParser) CreatedTimestamp() *int64 { + var ct *types.Timestamp + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + ct = p.mf.GetMetric()[p.metricPos].GetCounter().GetCreatedTimestamp() + case dto.MetricType_SUMMARY: + ct = p.mf.GetMetric()[p.metricPos].GetSummary().GetCreatedTimestamp() + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + ct = p.mf.GetMetric()[p.metricPos].GetHistogram().GetCreatedTimestamp() + default: + } + ctAsTime, err := types.TimestampFromProto(ct) + if err != nil { + // Errors means ct == nil or invalid timestamp, which we silently ignore. + return nil + } + ctMilis := ctAsTime.UnixMilli() + return &ctMilis +} + +// Next advances the parser to the next "sample" (emulating the behavior of a +// text format parser). It returns (EntryInvalid, io.EOF) if no samples were +// read. +func (p *ProtobufParser) Next() (promtextparse.Entry, error) { + p.exemplarReturned = false + switch p.state { + case promtextparse.EntryInvalid: + p.metricPos = 0 + p.fieldPos = -2 + n, err := p.readDelimited(p.in[p.inPos:], p.mf) + p.inPos += n + if err != nil { + return p.state, err + } + + // Skip empty metric families. + if len(p.mf.GetMetric()) == 0 { + return p.Next() + } + + // We are at the beginning of a metric family. Put only the name + // into metricBytes and validate only name, help, and type for now. + name := p.mf.GetName() + if !model.IsValidMetricName(model.LabelValue(name)) { + return promtextparse.EntryInvalid, fmt.Errorf("invalid metric name: %s", name) + } + if help := p.mf.GetHelp(); !utf8.ValidString(help) { + return promtextparse.EntryInvalid, fmt.Errorf("invalid help for metric %q: %s", name, help) + } + switch p.mf.GetType() { + case dto.MetricType_COUNTER, + dto.MetricType_GAUGE, + dto.MetricType_HISTOGRAM, + dto.MetricType_GAUGE_HISTOGRAM, + dto.MetricType_SUMMARY, + dto.MetricType_UNTYPED: + // All good. + default: + return promtextparse.EntryInvalid, fmt.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) + } + unit := p.mf.GetUnit() + if len(unit) > 0 { + if p.mf.GetType() == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { + if !strings.HasSuffix(name[:len(name)-6], unit) || len(name)-6 < len(unit)+1 || name[len(name)-6-len(unit)-1] != '_' { + return promtextparse.EntryInvalid, fmt.Errorf("unit %q not a suffix of counter %q", unit, name) + } + } else if !strings.HasSuffix(name, unit) || len(name) < len(unit)+1 || name[len(name)-len(unit)-1] != '_' { + return promtextparse.EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", unit, name) + } + } + p.metricBytes.Reset() + p.metricBytes.WriteString(name) + + p.state = promtextparse.EntryHelp + case promtextparse.EntryHelp: + p.state = promtextparse.EntryType + case promtextparse.EntryType: + t := p.mf.GetType() + if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && + isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + p.state = promtextparse.EntryHistogram + } else { + p.state = promtextparse.EntrySeries + } + if err := p.updateMetricBytes(); err != nil { + return promtextparse.EntryInvalid, err + } + case promtextparse.EntryHistogram, promtextparse.EntrySeries: + if p.redoClassic { + p.redoClassic = false + p.state = promtextparse.EntrySeries + p.fieldPos = -3 + p.fieldsDone = false + } + t := p.mf.GetType() + if p.state == promtextparse.EntrySeries && !p.fieldsDone && + (t == dto.MetricType_SUMMARY || + t == dto.MetricType_HISTOGRAM || + t == dto.MetricType_GAUGE_HISTOGRAM) { + p.fieldPos++ + } else { + p.metricPos++ + p.fieldPos = -2 + p.fieldsDone = false + // If this is a metric family containing native + // histograms, we have to switch back to native + // histograms after parsing a classic histogram. + if p.state == promtextparse.EntrySeries && + (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && + isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + p.state = promtextparse.EntryHistogram + } + } + if p.metricPos >= len(p.mf.GetMetric()) { + p.state = promtextparse.EntryInvalid + return p.Next() + } + if err := p.updateMetricBytes(); err != nil { + return promtextparse.EntryInvalid, err + } + default: + return promtextparse.EntryInvalid, fmt.Errorf("invalid protobuf parsing state: %d", p.state) + } + return p.state, nil +} + +func (p *ProtobufParser) updateMetricBytes() error { + b := p.metricBytes + b.Reset() + b.WriteString(p.getMagicName()) + for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { + b.WriteByte(model.SeparatorByte) + n := lp.GetName() + if !model.LabelName(n).IsValid() { + return fmt.Errorf("invalid label name: %s", n) + } + b.WriteString(n) + b.WriteByte(model.SeparatorByte) + v := lp.GetValue() + if !utf8.ValidString(v) { + return fmt.Errorf("invalid label value: %s", v) + } + b.WriteString(v) + } + if needed, n, v := p.getMagicLabel(); needed { + b.WriteByte(model.SeparatorByte) + b.WriteString(n) + b.WriteByte(model.SeparatorByte) + b.WriteString(v) + } + return nil +} + +// getMagicName usually just returns p.mf.GetType() but adds a magic suffix +// ("_count", "_sum", "_bucket") if needed according to the current parser +// state. +func (p *ProtobufParser) getMagicName() string { + t := p.mf.GetType() + if p.state == promtextparse.EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_GAUGE_HISTOGRAM && t != dto.MetricType_SUMMARY) { + return p.mf.GetName() + } + if p.fieldPos == -2 { + return p.mf.GetName() + "_count" + } + if p.fieldPos == -1 { + return p.mf.GetName() + "_sum" + } + if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM { + return p.mf.GetName() + "_bucket" + } + return p.mf.GetName() +} + +// getMagicLabel returns if a magic label ("quantile" or "le") is needed and, if +// so, its name and value. It also sets p.fieldsDone if applicable. +func (p *ProtobufParser) getMagicLabel() (bool, string, string) { + if p.state == promtextparse.EntryHistogram || p.fieldPos < 0 { + return false, "", "" + } + switch p.mf.GetType() { + case dto.MetricType_SUMMARY: + qq := p.mf.GetMetric()[p.metricPos].GetSummary().GetQuantile() + q := qq[p.fieldPos] + p.fieldsDone = p.fieldPos == len(qq)-1 + return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile()) + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket() + if p.fieldPos >= len(bb) { + p.fieldsDone = true + return true, model.BucketLabel, "+Inf" + } + b := bb[p.fieldPos] + p.fieldsDone = math.IsInf(b.GetUpperBound(), +1) + return true, model.BucketLabel, formatOpenMetricsFloat(b.GetUpperBound()) + default: + return false, "", "" + } +} + +func (p *ProtobufParser) readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { + if p.readDelimitedFunc != nil { + return p.readDelimitedFunc(b, mf) + } + return readDelimited(b, mf) +} + +var errInvalidVarint = errors.New("protobufparse: invalid varint encountered") + +// ReadDelimited is essentially doing what the function of the same name in +// github.com/matttproud/golang_protobuf_extensions/pbutil is doing, but it is +// specific to a MetricFamily, utilizes the more efficient gogo-protobuf +// unmarshaling, and acts on a byte slice directly without any additional +// staging buffers. +func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { + if len(b) == 0 { + return 0, io.EOF + } + messageLength, varIntLength := proto.DecodeVarint(b) + if varIntLength == 0 || varIntLength > binary.MaxVarintLen32 { + return 0, errInvalidVarint + } + totalLength := varIntLength + int(messageLength) //nolint + if totalLength > len(b) { + return 0, fmt.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) + } + mf.Reset() + return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) +} + +// formatOpenMetricsFloat works like the usual Go string formatting of a fleat +// but appends ".0" if the resulting number would otherwise contain neither a +// "." nor an "e". +func formatOpenMetricsFloat(f float64) string { + // A few common cases hardcoded. + switch { + case f == 1: + return "1.0" + case f == 0: + return "0.0" + case f == -1: + return "-1.0" + case math.IsNaN(f): + return "NaN" + case math.IsInf(f, +1): + return "+Inf" + case math.IsInf(f, -1): + return "-Inf" + } + s := fmt.Sprint(f) + if strings.ContainsAny(s, "e.") { + return s + } + return s + ".0" +} + +// isNativeHistogram returns false iff the provided histograms has no spans at +// all (neither positive nor negative) and a zero threshold of 0 and a zero +// count of 0. In principle, this could still be meant to be a native histogram +// with a zero threshold of 0 and no observations yet. In that case, +// instrumentation libraries should add a "no-op" span (e.g. length zero, offset +// zero) to signal that the histogram is meant to be parsed as a native +// histogram. Failing to do so will cause Prometheus to parse it as a classic +// histogram as long as no observations have happened. +func isNativeHistogram(h *dto.Histogram) bool { + return len(h.GetPositiveSpan()) > 0 || + len(h.GetNegativeSpan()) > 0 || + h.GetZeroThreshold() > 0 || + h.GetZeroCount() > 0 +} diff --git a/pkg/promotel/internal/prometheus/textparse/protobufparse_test.go b/pkg/promotel/internal/prometheus/textparse/protobufparse_test.go new file mode 100644 index 0000000000..a84b66044f --- /dev/null +++ b/pkg/promotel/internal/prometheus/textparse/protobufparse_test.go @@ -0,0 +1,2277 @@ +package textparse + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + promtextparse "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/util/testutil" + "github.com/stretchr/testify/require" + + dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" +) + +func createTestProtoBuf(t *testing.T) ([]string, *bytes.Buffer) { + testMetricFamilies := []string{ + `name: "go_build_info" +help: "Build information about the main Go module." +type: GAUGE +metric: < + label: < + name: "checksum" + value: "" + > + label: < + name: "path" + value: "github.com/prometheus/client_golang" + > + label: < + name: "version" + value: "(devel)" + > + gauge: < + value: 1 + > +> + +`, + `name: "go_memstats_alloc_bytes_total" +help: "Total number of bytes allocated, even if freed." +type: COUNTER +unit: "bytes" +metric: < + counter: < + value: 1.546544e+06 + exemplar: < + label: < + name: "dummyID" + value: "42" + > + value: 12 + timestamp: < + seconds: 1625851151 + nanos: 233181499 + > + > + > +> + +`, + `name: "something_untyped" +help: "Just to test the untyped type." +type: UNTYPED +metric: < + untyped: < + value: 42 + > + timestamp_ms: 1234567 +> + +`, + `name: "test_histogram" +help: "Test histogram with many buckets removed to keep it manageable in size." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +> + +`, + `name: "test_gauge_histogram" +help: "Like test_histogram but as gauge histogram." +type: GAUGE_HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +> + +`, + `name: "test_float_histogram" +help: "Test float histogram with many buckets removed to keep it manageable in size." +type: HISTOGRAM +metric: < + histogram: < + sample_count_float: 175.0 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count_float: 2.0 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count_float: 4.0 + upper_bound: -0.0003899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count_float: 16 + upper_bound: -0.0002899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count_float: 2.0 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_count: 1.0 + negative_count: 3.0 + negative_count: -2.0 + negative_count: -1.0 + negative_count: 1.0 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_count: 1.0 + positive_count: 2.0 + positive_count: -1.0 + positive_count: -1.0 + > + timestamp_ms: 1234568 +> + +`, + `name: "test_gauge_float_histogram" +help: "Like test_float_histogram but as gauge histogram." +type: GAUGE_HISTOGRAM +metric: < + histogram: < + sample_count_float: 175.0 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count_float: 2.0 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count_float: 4.0 + upper_bound: -0.0003899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count_float: 16 + upper_bound: -0.0002899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count_float: 2.0 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_count: 1.0 + negative_count: 3.0 + negative_count: -2.0 + negative_count: -1.0 + negative_count: 1.0 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_count: 1.0 + positive_count: 2.0 + positive_count: -1.0 + positive_count: -1.0 + > + timestamp_ms: 1234568 +> + +`, + `name: "test_histogram2" +help: "Similar histogram as before but now without sparse buckets." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.000828 + bucket: < + cumulative_count: 2 + upper_bound: -0.00048 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.00038 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00038 + timestamp: < + seconds: 1625851153 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 16 + upper_bound: 1 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.000295 + > + > + schema: 0 + zero_threshold: 0 + > +> + +`, + `name: "test_histogram_family" +help: "Test histogram metric family with two very simple histograms." +type: HISTOGRAM +metric: < + label: < + name: "foo" + value: "bar" + > + histogram: < + sample_count: 5 + sample_sum: 12.1 + bucket: < + cumulative_count: 2 + upper_bound: 1.1 + > + bucket: < + cumulative_count: 3 + upper_bound: 2.2 + > + schema: 3 + positive_span: < + offset: 8 + length: 2 + > + positive_delta: 2 + positive_delta: 1 + > +> +metric: < + label: < + name: "foo" + value: "baz" + > + histogram: < + sample_count: 6 + sample_sum: 13.1 + bucket: < + cumulative_count: 1 + upper_bound: 1.1 + > + bucket: < + cumulative_count: 5 + upper_bound: 2.2 + > + schema: 3 + positive_span: < + offset: 8 + length: 2 + > + positive_delta: 1 + positive_delta: 4 + > +> + +`, + `name: "test_float_histogram_with_zerothreshold_zero" +help: "Test float histogram with a zero threshold of zero." +type: HISTOGRAM +metric: < + histogram: < + sample_count_float: 5.0 + sample_sum: 12.1 + schema: 3 + positive_span: < + offset: 8 + length: 2 + > + positive_count: 2.0 + positive_count: 3.0 + > +> + +`, + `name: "rpc_durations_seconds" +help: "RPC latency distributions." +type: SUMMARY +metric: < + label: < + name: "service" + value: "exponential" + > + summary: < + sample_count: 262 + sample_sum: 0.00025551262820703587 + quantile: < + quantile: 0.5 + value: 6.442786329648548e-07 + > + quantile: < + quantile: 0.9 + value: 1.9435742936658396e-06 + > + quantile: < + quantile: 0.99 + value: 4.0471608667037015e-06 + > + > +> +`, + `name: "without_quantiles" +help: "A summary without quantiles." +type: SUMMARY +metric: < + summary: < + sample_count: 42 + sample_sum: 1.234 + > +> +`, + `name: "empty_histogram" +help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram." +type: HISTOGRAM +metric: < + histogram: < + positive_span: < + offset: 0 + length: 0 + > + > +> + +`, + `name: "test_counter_with_createdtimestamp" +help: "A counter with a created timestamp." +type: COUNTER +metric: < + counter: < + value: 42 + created_timestamp: < + seconds: 1 + nanos: 1 + > + > +> + +`, + `name: "test_summary_with_createdtimestamp" +help: "A summary with a created timestamp." +type: SUMMARY +metric: < + summary: < + sample_count: 42 + sample_sum: 1.234 + created_timestamp: < + seconds: 1 + nanos: 1 + > + > +> + +`, + `name: "test_histogram_with_createdtimestamp" +help: "A histogram with a created timestamp." +type: HISTOGRAM +metric: < + histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > + positive_span: < + offset: 0 + length: 0 + > + > +> + +`, + `name: "test_gaugehistogram_with_createdtimestamp" +help: "A gauge histogram with a created timestamp." +type: GAUGE_HISTOGRAM +metric: < + histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > + positive_span: < + offset: 0 + length: 0 + > + > +> + +`, + `name: "test_histogram_with_native_histogram_exemplars" +help: "A histogram with native histogram exemplars." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + exemplars: < + label: < + name: "dummyID" + value: "59780" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + exemplars: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + exemplars: < + label: < + name: "dummyID" + value: "59772" + > + value: -0.00052 + timestamp: < + seconds: 1625851160 + nanos: 156848499 + > + > + > + timestamp_ms: 1234568 +> + +`, + } + + varintBuf := make([]byte, binary.MaxVarintLen32) + buf := &bytes.Buffer{} + + for _, tmf := range testMetricFamilies { + pb := &dto.MetricFamily{} + // From text to proto message. + require.NoError(t, proto.UnmarshalText(tmf, pb)) + // From proto message to binary protobuf. + protoBuf, err := proto.Marshal(pb) + require.NoError(t, err) + + // Write first length, then binary protobuf. + varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) + buf.Write(varintBuf[:varintLength]) + buf.Write(protoBuf) + } + + return testMetricFamilies, buf +} + +func TestProtobufParse(t *testing.T) { + type parseResult struct { + lset labels.Labels + m string + t int64 + v float64 + typ model.MetricType + help string + unit string + comment string + shs *histogram.Histogram + fhs *histogram.FloatHistogram + e []exemplar.Exemplar + ct int64 + } + + _, inputBuf := createTestProtoBuf(t) + + scenarios := []struct { + name string + parser promtextparse.Parser + expected []parseResult + }{ + { + name: "ignore classic buckets of native histograms", + parser: NewProtobufParser(inputBuf.Bytes(), false, labels.NewSymbolTable()), + expected: []parseResult{ + { + m: "go_build_info", + help: "Build information about the main Go module.", + }, + { + m: "go_build_info", + typ: model.MetricTypeGauge, + }, + { + m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", + v: 1, + lset: labels.FromStrings( + "__name__", "go_build_info", + "checksum", "", + "path", "github.com/prometheus/client_golang", + "version", "(devel)", + ), + }, + { + m: "go_memstats_alloc_bytes_total", + help: "Total number of bytes allocated, even if freed.", + unit: "bytes", + }, + { + m: "go_memstats_alloc_bytes_total", + typ: model.MetricTypeCounter, + }, + { + m: "go_memstats_alloc_bytes_total", + v: 1.546544e+06, + lset: labels.FromStrings( + "__name__", "go_memstats_alloc_bytes_total", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, + }, + }, + { + m: "something_untyped", + help: "Just to test the untyped type.", + }, + { + m: "something_untyped", + typ: model.MetricTypeUnknown, + }, + { + m: "something_untyped", + t: 1234567, + v: 42, + lset: labels.FromStrings( + "__name__", "something_untyped", + ), + }, + { + m: "test_histogram", + help: "Test histogram with many buckets removed to keep it manageable in size.", + }, + { + m: "test_histogram", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { + m: "test_gauge_histogram", + help: "Like test_histogram but as gauge histogram.", + }, + { + m: "test_gauge_histogram", + typ: model.MetricTypeGaugeHistogram, + }, + { + m: "test_gauge_histogram", + t: 1234568, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { + m: "test_float_histogram", + help: "Test float histogram with many buckets removed to keep it manageable in size.", + }, + { + m: "test_float_histogram", + typ: model.MetricTypeHistogram, + }, + { + m: "test_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { + m: "test_gauge_float_histogram", + help: "Like test_float_histogram but as gauge histogram.", + }, + { + m: "test_gauge_float_histogram", + typ: model.MetricTypeGaugeHistogram, + }, + { + m: "test_gauge_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { + m: "test_histogram2", + help: "Similar histogram as before but now without sparse buckets.", + }, + { + m: "test_histogram2", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram2_count", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_count", + ), + }, + { + m: "test_histogram2_sum", + v: 0.000828, + lset: labels.FromStrings( + "__name__", "test_histogram2_sum", + ), + }, + { + m: "test_histogram2_bucket\xffle\xff-0.00048", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00048", + ), + }, + { + m: "test_histogram2_bucket\xffle\xff-0.00038", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00038", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram2_bucket\xffle\xff1.0", + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "1.0", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, + }, + }, + { + m: "test_histogram2_bucket\xffle\xff+Inf", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "+Inf", + ), + }, + { + m: "test_histogram_family", + help: "Test histogram metric family with two very simple histograms.", + }, + { + m: "test_histogram_family", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_family\xfffoo\xffbar", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 5, + Sum: 12.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "bar", + ), + }, + { + m: "test_histogram_family\xfffoo\xffbaz", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 13.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{1, 4}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "baz", + ), + }, + { + m: "test_float_histogram_with_zerothreshold_zero", + help: "Test float histogram with a zero threshold of zero.", + }, + { + m: "test_float_histogram_with_zerothreshold_zero", + typ: model.MetricTypeHistogram, + }, + { + m: "test_float_histogram_with_zerothreshold_zero", + fhs: &histogram.FloatHistogram{ + Count: 5.0, + Sum: 12.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + PositiveBuckets: []float64{2.0, 3.0}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_float_histogram_with_zerothreshold_zero", + ), + }, + { + m: "rpc_durations_seconds", + help: "RPC latency distributions.", + }, + { + m: "rpc_durations_seconds", + typ: model.MetricTypeSummary, + }, + { + m: "rpc_durations_seconds_count\xffservice\xffexponential", + v: 262, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_count", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds_sum\xffservice\xffexponential", + v: 0.00025551262820703587, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_sum", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", + v: 6.442786329648548e-07, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.5", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", + v: 1.9435742936658396e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.9", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", + v: 4.0471608667037015e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.99", + "service", "exponential", + ), + }, + { + m: "without_quantiles", + help: "A summary without quantiles.", + }, + { + m: "without_quantiles", + typ: model.MetricTypeSummary, + }, + { + m: "without_quantiles_count", + v: 42, + lset: labels.FromStrings( + "__name__", "without_quantiles_count", + ), + }, + { + m: "without_quantiles_sum", + v: 1.234, + lset: labels.FromStrings( + "__name__", "without_quantiles_sum", + ), + }, + { + m: "empty_histogram", + help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", + }, + { + m: "empty_histogram", + typ: model.MetricTypeHistogram, + }, + { + m: "empty_histogram", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "empty_histogram", + ), + }, + { + m: "test_counter_with_createdtimestamp", + help: "A counter with a created timestamp.", + }, + { + m: "test_counter_with_createdtimestamp", + typ: model.MetricTypeCounter, + }, + { + m: "test_counter_with_createdtimestamp", + v: 42, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_counter_with_createdtimestamp", + ), + }, + { + m: "test_summary_with_createdtimestamp", + help: "A summary with a created timestamp.", + }, + { + m: "test_summary_with_createdtimestamp", + typ: model.MetricTypeSummary, + }, + { + m: "test_summary_with_createdtimestamp_count", + v: 42, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_summary_with_createdtimestamp_count", + ), + }, + { + m: "test_summary_with_createdtimestamp_sum", + v: 1.234, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_summary_with_createdtimestamp_sum", + ), + }, + { + m: "test_histogram_with_createdtimestamp", + help: "A histogram with a created timestamp.", + }, + { + m: "test_histogram_with_createdtimestamp", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_createdtimestamp", + ct: 1000, + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_createdtimestamp", + ), + }, + { + m: "test_gaugehistogram_with_createdtimestamp", + help: "A gauge histogram with a created timestamp.", + }, + { + m: "test_gaugehistogram_with_createdtimestamp", + typ: model.MetricTypeGaugeHistogram, + }, + { + m: "test_gaugehistogram_with_createdtimestamp", + ct: 1000, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_gaugehistogram_with_createdtimestamp", + ), + }, + { + m: "test_histogram_with_native_histogram_exemplars", + help: "A histogram with native histogram exemplars.", + }, + { + m: "test_histogram_with_native_histogram_exemplars", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_native_histogram_exemplars", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, + }, + }, + }, + }, + { + name: "parse classic and native buckets", + parser: NewProtobufParser(inputBuf.Bytes(), true, labels.NewSymbolTable()), + expected: []parseResult{ + { // 0 + m: "go_build_info", + help: "Build information about the main Go module.", + }, + { // 1 + m: "go_build_info", + typ: model.MetricTypeGauge, + }, + { // 2 + m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", + v: 1, + lset: labels.FromStrings( + "__name__", "go_build_info", + "checksum", "", + "path", "github.com/prometheus/client_golang", + "version", "(devel)", + ), + }, + { // 3 + m: "go_memstats_alloc_bytes_total", + help: "Total number of bytes allocated, even if freed.", + }, + { // 4 + m: "go_memstats_alloc_bytes_total", + typ: model.MetricTypeCounter, + }, + { // 5 + m: "go_memstats_alloc_bytes_total", + v: 1.546544e+06, + lset: labels.FromStrings( + "__name__", "go_memstats_alloc_bytes_total", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, + }, + }, + { // 6 + m: "something_untyped", + help: "Just to test the untyped type.", + }, + { // 7 + m: "something_untyped", + typ: model.MetricTypeUnknown, + }, + { // 8 + m: "something_untyped", + t: 1234567, + v: 42, + lset: labels.FromStrings( + "__name__", "something_untyped", + ), + }, + { // 9 + m: "test_histogram", + help: "Test histogram with many buckets removed to keep it manageable in size.", + }, + { // 10 + m: "test_histogram", + typ: model.MetricTypeHistogram, + }, + { // 11 + m: "test_histogram", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 12 + m: "test_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_count", + ), + }, + { // 13 + m: "test_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_histogram_sum", + ), + }, + { // 14 + m: "test_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 15 + m: "test_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 16 + m: "test_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 17 + m: "test_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "+Inf", + ), + }, + { // 18 + m: "test_gauge_histogram", + help: "Like test_histogram but as gauge histogram.", + }, + { // 19 + m: "test_gauge_histogram", + typ: model.MetricTypeGaugeHistogram, + }, + { // 20 + m: "test_gauge_histogram", + t: 1234568, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 21 + m: "test_gauge_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_count", + ), + }, + { // 22 + m: "test_gauge_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_sum", + ), + }, + { // 23 + m: "test_gauge_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 24 + m: "test_gauge_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 25 + m: "test_gauge_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 26 + m: "test_gauge_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "+Inf", + ), + }, + { // 27 + m: "test_float_histogram", + help: "Test float histogram with many buckets removed to keep it manageable in size.", + }, + { // 28 + m: "test_float_histogram", + typ: model.MetricTypeHistogram, + }, + { // 29 + m: "test_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 30 + m: "test_float_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_float_histogram_count", + ), + }, + { // 31 + m: "test_float_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_float_histogram_sum", + ), + }, + { // 32 + m: "test_float_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 33 + m: "test_float_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 34 + m: "test_float_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 35 + m: "test_float_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "+Inf", + ), + }, + { // 36 + m: "test_gauge_float_histogram", + help: "Like test_float_histogram but as gauge histogram.", + }, + { // 37 + m: "test_gauge_float_histogram", + typ: model.MetricTypeGaugeHistogram, + }, + { // 38 + m: "test_gauge_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 39 + m: "test_gauge_float_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_count", + ), + }, + { // 40 + m: "test_gauge_float_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_sum", + ), + }, + { // 41 + m: "test_gauge_float_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 42 + m: "test_gauge_float_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 43 + m: "test_gauge_float_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 44 + m: "test_gauge_float_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "+Inf", + ), + }, + { // 45 + m: "test_histogram2", + help: "Similar histogram as before but now without sparse buckets.", + }, + { // 46 + m: "test_histogram2", + typ: model.MetricTypeHistogram, + }, + { // 47 + m: "test_histogram2_count", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_count", + ), + }, + { // 48 + m: "test_histogram2_sum", + v: 0.000828, + lset: labels.FromStrings( + "__name__", "test_histogram2_sum", + ), + }, + { // 49 + m: "test_histogram2_bucket\xffle\xff-0.00048", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00048", + ), + }, + { // 50 + m: "test_histogram2_bucket\xffle\xff-0.00038", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00038", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + }, + }, + { // 51 + m: "test_histogram2_bucket\xffle\xff1.0", + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "1.0", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, + }, + }, + { // 52 + m: "test_histogram2_bucket\xffle\xff+Inf", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "+Inf", + ), + }, + { // 53 + m: "test_histogram_family", + help: "Test histogram metric family with two very simple histograms.", + }, + { // 54 + m: "test_histogram_family", + typ: model.MetricTypeHistogram, + }, + { // 55 + m: "test_histogram_family\xfffoo\xffbar", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 5, + Sum: 12.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "bar", + ), + }, + { // 56 + m: "test_histogram_family_count\xfffoo\xffbar", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_count", + "foo", "bar", + ), + }, + { // 57 + m: "test_histogram_family_sum\xfffoo\xffbar", + v: 12.1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_sum", + "foo", "bar", + ), + }, + { // 58 + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff1.1", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "1.1", + ), + }, + { // 59 + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff2.2", + v: 3, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "2.2", + ), + }, + { // 60 + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff+Inf", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "+Inf", + ), + }, + { // 61 + m: "test_histogram_family\xfffoo\xffbaz", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 13.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{1, 4}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "baz", + ), + }, + { // 62 + m: "test_histogram_family_count\xfffoo\xffbaz", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram_family_count", + "foo", "baz", + ), + }, + { // 63 + m: "test_histogram_family_sum\xfffoo\xffbaz", + v: 13.1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_sum", + "foo", "baz", + ), + }, + { // 64 + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff1.1", + v: 1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "1.1", + ), + }, + { // 65 + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff2.2", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "2.2", + ), + }, + { // 66 + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff+Inf", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "+Inf", + ), + }, + { // 67 + m: "test_float_histogram_with_zerothreshold_zero", + help: "Test float histogram with a zero threshold of zero.", + }, + { // 68 + m: "test_float_histogram_with_zerothreshold_zero", + typ: model.MetricTypeHistogram, + }, + { // 69 + m: "test_float_histogram_with_zerothreshold_zero", + fhs: &histogram.FloatHistogram{ + Count: 5.0, + Sum: 12.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + PositiveBuckets: []float64{2.0, 3.0}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_float_histogram_with_zerothreshold_zero", + ), + }, + { // 70 + m: "rpc_durations_seconds", + help: "RPC latency distributions.", + }, + { // 71 + m: "rpc_durations_seconds", + typ: model.MetricTypeSummary, + }, + { // 72 + m: "rpc_durations_seconds_count\xffservice\xffexponential", + v: 262, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_count", + "service", "exponential", + ), + }, + { // 73 + m: "rpc_durations_seconds_sum\xffservice\xffexponential", + v: 0.00025551262820703587, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_sum", + "service", "exponential", + ), + }, + { // 74 + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", + v: 6.442786329648548e-07, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.5", + "service", "exponential", + ), + }, + { // 75 + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", + v: 1.9435742936658396e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.9", + "service", "exponential", + ), + }, + { // 76 + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", + v: 4.0471608667037015e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.99", + "service", "exponential", + ), + }, + { // 77 + m: "without_quantiles", + help: "A summary without quantiles.", + }, + { // 78 + m: "without_quantiles", + typ: model.MetricTypeSummary, + }, + { // 79 + m: "without_quantiles_count", + v: 42, + lset: labels.FromStrings( + "__name__", "without_quantiles_count", + ), + }, + { // 80 + m: "without_quantiles_sum", + v: 1.234, + lset: labels.FromStrings( + "__name__", "without_quantiles_sum", + ), + }, + { // 78 + m: "empty_histogram", + help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", + }, + { // 79 + m: "empty_histogram", + typ: model.MetricTypeHistogram, + }, + { // 80 + m: "empty_histogram", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "empty_histogram", + ), + }, + { // 81 + m: "test_counter_with_createdtimestamp", + help: "A counter with a created timestamp.", + }, + { // 82 + m: "test_counter_with_createdtimestamp", + typ: model.MetricTypeCounter, + }, + { // 83 + m: "test_counter_with_createdtimestamp", + v: 42, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_counter_with_createdtimestamp", + ), + }, + { // 84 + m: "test_summary_with_createdtimestamp", + help: "A summary with a created timestamp.", + }, + { // 85 + m: "test_summary_with_createdtimestamp", + typ: model.MetricTypeSummary, + }, + { // 86 + m: "test_summary_with_createdtimestamp_count", + v: 42, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_summary_with_createdtimestamp_count", + ), + }, + { // 87 + m: "test_summary_with_createdtimestamp_sum", + v: 1.234, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_summary_with_createdtimestamp_sum", + ), + }, + { // 88 + m: "test_histogram_with_createdtimestamp", + help: "A histogram with a created timestamp.", + }, + { // 89 + m: "test_histogram_with_createdtimestamp", + typ: model.MetricTypeHistogram, + }, + { // 90 + m: "test_histogram_with_createdtimestamp", + ct: 1000, + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_createdtimestamp", + ), + }, + { // 91 + m: "test_gaugehistogram_with_createdtimestamp", + help: "A gauge histogram with a created timestamp.", + }, + { // 92 + m: "test_gaugehistogram_with_createdtimestamp", + typ: model.MetricTypeGaugeHistogram, + }, + { // 93 + m: "test_gaugehistogram_with_createdtimestamp", + ct: 1000, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_gaugehistogram_with_createdtimestamp", + ), + }, + { // 94 + m: "test_histogram_with_native_histogram_exemplars", + help: "A histogram with native histogram exemplars.", + }, + { // 95 + m: "test_histogram_with_native_histogram_exemplars", + typ: model.MetricTypeHistogram, + }, + { // 96 + m: "test_histogram_with_native_histogram_exemplars", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, + }, + }, + { // 97 + m: "test_histogram_with_native_histogram_exemplars_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_count", + ), + }, + { // 98 + m: "test_histogram_with_native_histogram_exemplars_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_sum", + ), + }, + { // 99 + m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 100 + m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 101 + m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 102 + m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_bucket", + "le", "+Inf", + ), + }, + }, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + var ( + i int + res labels.Labels + p = scenario.parser + exp = scenario.expected + ) + + for { + et, err := p.Next() + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + + switch et { + case promtextparse.EntrySeries: + m, ts, v := p.Series() + + var e exemplar.Exemplar + p.Metric(&res) + eFound := p.Exemplar(&e) + ct := p.CreatedTimestamp() + require.Equal(t, exp[i].m, string(m), "i: %d", i) + if ts != nil { + require.Equal(t, exp[i].t, *ts, "i: %d", i) + } else { + require.Equal(t, int64(0), exp[i].t, "i: %d", i) + } + require.Equal(t, exp[i].v, v, "i: %d", i) // nolint + testutil.RequireEqual(t, exp[i].lset, res, "i: %d", i) + if len(exp[i].e) == 0 { + require.False(t, eFound, "i: %d", i) + } else { + require.True(t, eFound, "i: %d", i) + testutil.RequireEqual(t, exp[i].e[0], e, "i: %d", i) + require.False(t, p.Exemplar(&e), "too many exemplars returned, i: %d", i) + } + if exp[i].ct != 0 { + require.NotNilf(t, ct, "i: %d", i) + require.Equal(t, exp[i].ct, *ct, "i: %d", i) + } else { + require.Nilf(t, ct, "i: %d", i) + } + + case promtextparse.EntryHistogram: + m, ts, shs, fhs := p.Histogram() + p.Metric(&res) + require.Equal(t, exp[i].m, string(m), "i: %d", i) + if ts != nil { + require.Equal(t, exp[i].t, *ts, "i: %d", i) + } else { + require.Equal(t, int64(0), exp[i].t, "i: %d", i) + } + testutil.RequireEqual(t, exp[i].lset, res, "i: %d", i) + require.Equal(t, exp[i].m, string(m), "i: %d", i) + if shs != nil { + require.Equal(t, exp[i].shs, shs, "i: %d", i) + } else { + require.Equal(t, exp[i].fhs, fhs, "i: %d", i) + } + j := 0 + for e := (exemplar.Exemplar{}); p.Exemplar(&e); j++ { + testutil.RequireEqual(t, exp[i].e[j], e, "i: %d", i) + e = exemplar.Exemplar{} + } + require.Len(t, exp[i].e, j, "not enough exemplars found, i: %d", i) + + case promtextparse.EntryType: + m, typ := p.Type() + require.Equal(t, exp[i].m, string(m), "i: %d", i) + require.Equal(t, exp[i].typ, typ, "i: %d", i) + + case promtextparse.EntryHelp: + m, h := p.Help() + require.Equal(t, exp[i].m, string(m), "i: %d", i) + require.Equal(t, exp[i].help, string(h), "i: %d", i) + + case promtextparse.EntryUnit: + m, u := p.Unit() + require.Equal(t, exp[i].m, string(m), "i: %d", i) + require.Equal(t, exp[i].unit, string(u), "i: %d", i) + + case promtextparse.EntryComment: + require.Equal(t, exp[i].comment, string(p.Comment()), "i: %d", i) + case promtextparse.EntryInvalid: + require.Fail(t, "unexpected invalid entry") + } + + i++ + } + require.Len(t, exp, i) + }) + } +} diff --git a/pkg/promotel/internal/prometheusreceiver/config.go b/pkg/promotel/internal/prometheusreceiver/config.go new file mode 100644 index 0000000000..026e1bc71b --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/config.go @@ -0,0 +1,144 @@ +package prometheusreceiver + +import ( + "fmt" + "os" + "sort" + "strings" + + "github.com/prometheus/client_golang/prometheus" + commonconfig "github.com/prometheus/common/config" + promconfig "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery/kubernetes" + "go.opentelemetry.io/collector/confmap" + "gopkg.in/yaml.v2" +) + +// Config defines configuration for Prometheus receiver. +type Config struct { + PrometheusConfig *PromConfig `mapstructure:"config"` + TrimMetricSuffixes bool `mapstructure:"trim_metric_suffixes"` + // UseStartTimeMetric enables retrieving the start time of all counter metrics + // from the process_start_time_seconds metric. This is only correct if all counters on that endpoint + // started after the process start time, and the process is the only actor exporting the metric after + // the process started. It should not be used in "exporters" which export counters that may have + // started before the process itself. Use only if you know what you are doing, as this may result + // in incorrect rate calculations. + UseStartTimeMetric bool `mapstructure:"use_start_time_metric"` + StartTimeMetricRegex string `mapstructure:"start_time_metric_regex"` + + // ReportExtraScrapeMetrics - enables reporting of additional metrics for Prometheus client like scrape_body_size_bytes + ReportExtraScrapeMetrics bool `mapstructure:"report_extra_scrape_metrics"` + + TargetAllocator any `mapstructure:"target_allocator"` + + Registry *prometheus.Registry `mapstructure:"-"` +} + +// Validate checks the receiver configuration is valid. +func (cfg *Config) Validate() error { + return nil +} + +// PromConfig is a redeclaration of promconfig.Config because we need custom unmarshaling +// as prometheus "config" uses `yaml` tags. +type PromConfig promconfig.Config + +var _ confmap.Unmarshaler = (*PromConfig)(nil) + +func (cfg *PromConfig) Unmarshal(componentParser *confmap.Conf) error { + cfgMap := componentParser.ToStringMap() + if len(cfgMap) == 0 { + return nil + } + return unmarshalYAML(cfgMap, (*promconfig.Config)(cfg)) +} + +func (cfg *PromConfig) Validate() error { + // Reject features that Prometheus supports but that the receiver doesn't support: + // See: + // * https://github.com/open-telemetry/opentelemetry-collector/issues/3863 + // * https://github.com/open-telemetry/wg-prometheus/issues/3 + unsupportedFeatures := make([]string, 0, 4) + if len(cfg.RemoteWriteConfigs) != 0 { + unsupportedFeatures = append(unsupportedFeatures, "remote_write") + } + if len(cfg.RemoteReadConfigs) != 0 { + unsupportedFeatures = append(unsupportedFeatures, "remote_read") + } + if len(cfg.RuleFiles) != 0 { + unsupportedFeatures = append(unsupportedFeatures, "rule_files") + } + if len(cfg.AlertingConfig.AlertRelabelConfigs) != 0 { + unsupportedFeatures = append(unsupportedFeatures, "alert_config.relabel_configs") + } + if len(cfg.AlertingConfig.AlertmanagerConfigs) != 0 { + unsupportedFeatures = append(unsupportedFeatures, "alert_config.alertmanagers") + } + if len(unsupportedFeatures) != 0 { + // Sort the values for deterministic error messages. + sort.Strings(unsupportedFeatures) + return fmt.Errorf("unsupported features:\n\t%s", strings.Join(unsupportedFeatures, "\n\t")) + } + + scrapeConfigs, err := (*promconfig.Config)(cfg).GetScrapeConfigs() + if err != nil { + return err + } + + for _, sc := range scrapeConfigs { + if err := validateHTTPClientConfig(&sc.HTTPClientConfig); err != nil { + return err + } + + for _, c := range sc.ServiceDiscoveryConfigs { + if c, ok := c.(*kubernetes.SDConfig); ok { + if err := validateHTTPClientConfig(&c.HTTPClientConfig); err != nil { + return err + } + } + } + } + return nil +} + +func unmarshalYAML(in map[string]any, out any) error { + yamlOut, err := yaml.Marshal(in) + if err != nil { + return fmt.Errorf("prometheus receiver: failed to marshal config to yaml: %w", err) + } + + err = yaml.UnmarshalStrict(yamlOut, out) + if err != nil { + return fmt.Errorf("prometheus receiver: failed to unmarshal yaml to prometheus config object: %w", err) + } + return nil +} + +func validateHTTPClientConfig(cfg *commonconfig.HTTPClientConfig) error { + if cfg.Authorization != nil { + if err := checkFile(cfg.Authorization.CredentialsFile); err != nil { + return fmt.Errorf("error checking authorization credentials file %q: %w", cfg.Authorization.CredentialsFile, err) + } + } + return checkTLSConfig(cfg.TLSConfig) +} + +func checkFile(fn string) error { + // Nothing set, nothing to error on. + if fn == "" { + return nil + } + _, err := os.Stat(fn) + return err +} + +func checkTLSConfig(tlsConfig commonconfig.TLSConfig) error { + if err := checkFile(tlsConfig.CertFile); err != nil { + return fmt.Errorf("error checking client cert file %q: %w", tlsConfig.CertFile, err) + } + if err := checkFile(tlsConfig.KeyFile); err != nil { + return fmt.Errorf("error checking client key file %q: %w", tlsConfig.KeyFile, err) + } + return nil +} diff --git a/pkg/promotel/internal/prometheusreceiver/config_test.go b/pkg/promotel/internal/prometheusreceiver/config_test.go new file mode 100644 index 0000000000..99f61ba6f7 --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/config_test.go @@ -0,0 +1,78 @@ +package prometheusreceiver + +import ( + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap/confmaptest" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheusreceiver/internal/metadata" +) + +func TestLoadConfig(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(metadata.Type, "").String()) + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(cfg)) + + r0 := cfg.(*Config) + assert.Equal(t, r0, factory.CreateDefaultConfig()) + + sub, err = cm.Sub(component.NewIDWithName(metadata.Type, "customname").String()) + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(cfg)) + + r1 := cfg.(*Config) + assert.Equal(t, "demo", r1.PrometheusConfig.ScrapeConfigs[0].JobName) + assert.Equal(t, 5*time.Second, time.Duration(r1.PrometheusConfig.ScrapeConfigs[0].ScrapeInterval)) + assert.True(t, r1.UseStartTimeMetric) + assert.True(t, r1.TrimMetricSuffixes) + assert.Equal(t, "^(.+_)*process_start_time_seconds$", r1.StartTimeMetricRegex) + assert.True(t, r1.ReportExtraScrapeMetrics) +} + +func TestValidateConfigWithScrapeConfigFiles(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config_scrape_config_files.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(metadata.Type, "").String()) + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(cfg)) + + require.NoError(t, component.ValidateConfig(cfg)) +} + +func TestLoadConfigFailsOnUnknownSection(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid-config-section.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(metadata.Type, "").String()) + require.NoError(t, err) + require.Error(t, sub.Unmarshal(cfg)) +} + +// As one of the config parameters is consuming prometheus +// configuration as a subkey, ensure that invalid configuration +// within the subkey will also raise an error. +func TestLoadConfigFailsOnUnknownPrometheusSection(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid-config-prometheus-section.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(metadata.Type, "").String()) + require.NoError(t, err) + require.Error(t, sub.Unmarshal(cfg)) +} diff --git a/pkg/promotel/internal/prometheusreceiver/factory.go b/pkg/promotel/internal/prometheusreceiver/factory.go new file mode 100644 index 0000000000..1a020364af --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/factory.go @@ -0,0 +1,65 @@ +package prometheusreceiver + +import ( + "context" + + "github.com/prometheus/client_golang/prometheus" + promconfig "github.com/prometheus/prometheus/config" + _ "github.com/prometheus/prometheus/discovery/install" // init() of this package registers service discovery impl. + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheusreceiver/internal/metadata" +) + +// NewFactory creates a new Prometheus receiver factory. +func NewFactory() receiver.Factory { + return receiver.NewFactory( + metadata.Type, + createDefaultConfig, + receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability)) +} + +func NewFactoryWithRegistry(reg *prometheus.Registry) receiver.Factory { + return receiver.NewFactory( + metadata.Type, + createDefaultConfigWithRegistry(reg), + receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability)) +} + +func createDefaultConfig() component.Config { + return &Config{ + PrometheusConfig: &PromConfig{ + GlobalConfig: promconfig.DefaultGlobalConfig, + }, + } +} +func createDefaultConfigWithRegistry(reg *prometheus.Registry) func() component.Config { + return func() component.Config { + c := createDefaultConfig().(*Config) + c.Registry = reg + return c + } +} + +func createMetricsReceiver( + _ context.Context, + set receiver.Settings, + cfg component.Config, + nextConsumer consumer.Metrics, +) (receiver.Metrics, error) { + configWarnings(set.Logger, cfg.(*Config)) + return newPrometheusReceiver(set, cfg.(*Config), nextConsumer), nil +} + +func configWarnings(logger *zap.Logger, cfg *Config) { + for _, sc := range cfg.PrometheusConfig.ScrapeConfigs { + for _, rc := range sc.MetricRelabelConfigs { + if rc.TargetLabel == "__name__" { + logger.Warn("metric renaming using metric_relabel_configs will result in unknown-typed metrics without a unit or description", zap.String("job", sc.JobName)) + } + } + } +} diff --git a/pkg/promotel/internal/prometheusreceiver/factory_test.go b/pkg/promotel/internal/prometheusreceiver/factory_test.go new file mode 100644 index 0000000000..8f72ab21c1 --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/factory_test.go @@ -0,0 +1,61 @@ +package prometheusreceiver + +import ( + "context" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver/receivertest" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheusreceiver/internal/metadata" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + require.NoError(t, componenttest.CheckConfigStruct(cfg)) +} + +func TestCreateReceiver(t *testing.T) { + cfg := createDefaultConfig() + + // The default config does not provide scrape_config so we expect that metrics receiver + // creation must also fail. + creationSet := receivertest.NewNopSettings() + mReceiver, _ := createMetricsReceiver(context.Background(), creationSet, cfg, consumertest.NewNop()) + assert.NotNil(t, mReceiver) + assert.NotNil(t, mReceiver.(*pReceiver).cfg.PrometheusConfig.GlobalConfig) +} + +func TestFactoryCanParseServiceDiscoveryConfigs(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config_sd.yaml")) + require.NoError(t, err) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(component.NewIDWithName(metadata.Type, "").String()) + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(cfg)) +} + +func TestMultipleCreate(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + set := receivertest.NewNopSettings() + firstRcvr, err := factory.CreateMetrics(context.Background(), set, cfg, consumertest.NewNop()) + require.NoError(t, err) + host := componenttest.NewNopHost() + require.NoError(t, err) + require.NoError(t, firstRcvr.Start(context.Background(), host)) + require.NoError(t, firstRcvr.Shutdown(context.Background())) + secondRcvr, err := factory.CreateMetrics(context.Background(), set, cfg, consumertest.NewNop()) + require.NoError(t, err) + require.NoError(t, secondRcvr.Start(context.Background(), host)) + require.NoError(t, secondRcvr.Shutdown(context.Background())) +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/appendable.go b/pkg/promotel/internal/prometheusreceiver/internal/appendable.go new file mode 100644 index 0000000000..adc1f306fa --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/appendable.go @@ -0,0 +1,68 @@ +package internal + +import ( + "context" + "regexp" + "time" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/receiverhelper" +) + +// appendable translates Prometheus scraping diffs into OpenTelemetry format. +type appendable struct { + sink consumer.Metrics + metricAdjuster MetricsAdjuster + useStartTimeMetric bool + enableNativeHistograms bool + trimSuffixes bool + startTimeMetricRegex *regexp.Regexp + externalLabels labels.Labels + + settings receiver.Settings + obsrecv *receiverhelper.ObsReport +} + +// NewAppendable returns a storage.Appendable instance that emits metrics to the sink. +func NewAppendable( + sink consumer.Metrics, + set receiver.Settings, + gcInterval time.Duration, + useStartTimeMetric bool, + startTimeMetricRegex *regexp.Regexp, + useCreatedMetric bool, + enableNativeHistograms bool, + externalLabels labels.Labels, + trimSuffixes bool, +) (storage.Appendable, error) { + var metricAdjuster MetricsAdjuster + if !useStartTimeMetric { + metricAdjuster = NewInitialPointAdjuster(set.Logger, gcInterval, useCreatedMetric) + } else { + metricAdjuster = NewStartTimeMetricAdjuster(set.Logger, startTimeMetricRegex) + } + + obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ReceiverID: set.ID, Transport: transport, ReceiverCreateSettings: set}) + if err != nil { + return nil, err + } + + return &appendable{ + sink: sink, + settings: set, + metricAdjuster: metricAdjuster, + useStartTimeMetric: useStartTimeMetric, + enableNativeHistograms: enableNativeHistograms, + startTimeMetricRegex: startTimeMetricRegex, + externalLabels: externalLabels, + obsrecv: obsrecv, + trimSuffixes: trimSuffixes, + }, nil +} + +func (o *appendable) Appender(ctx context.Context) storage.Appender { + return newTransaction(ctx, o.metricAdjuster, o.sink, o.externalLabels, o.settings, o.obsrecv, o.trimSuffixes, o.enableNativeHistograms) +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/logger.go b/pkg/promotel/internal/prometheusreceiver/internal/logger.go new file mode 100644 index 0000000000..4a25bb06c3 --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/logger.go @@ -0,0 +1,139 @@ +package internal + +import ( + gokitLog "github.com/go-kit/log" + "github.com/go-kit/log/level" + "go.uber.org/zap" +) + +const ( + levelKey = "level" + msgKey = "msg" + errKey = "err" +) + +// NewZapToGokitLogAdapter create an adapter for zap.Logger to gokitLog.Logger +func NewZapToGokitLogAdapter(logger *zap.Logger) gokitLog.Logger { + // need to skip two levels in order to get the correct caller + // one for this method, the other for gokitLog + logger = logger.WithOptions(zap.AddCallerSkip(2)) + return &zapToGokitLogAdapter{l: logger.Sugar()} +} + +type zapToGokitLogAdapter struct { + l *zap.SugaredLogger +} + +type logData struct { + level level.Value + msg string + otherFields []any +} + +func (w *zapToGokitLogAdapter) Log(keyvals ...any) error { + // expecting key value pairs, the number of items need to be even + if len(keyvals)%2 == 0 { + // Extract log level and message and log them using corresponding zap function + ld := extractLogData(keyvals) + logFunc := levelToFunc(w.l, ld.level) + logFunc(ld.msg, ld.otherFields...) + } else { + // in case something goes wrong + w.l.Info(keyvals...) + } + return nil +} + +func extractLogData(keyvals []any) logData { + ld := logData{ + level: level.InfoValue(), // default + } + + for i := 0; i < len(keyvals); i += 2 { + key := keyvals[i] + val := keyvals[i+1] + + if l, ok := matchLogLevel(key, val); ok { + ld.level = l + continue + } + + if m, ok := matchLogMessage(key, val); ok { + ld.msg = m + continue + } + + if err, ok := matchError(key, val); ok { + ld.otherFields = append(ld.otherFields, zap.Error(err)) + continue + } + + ld.otherFields = append(ld.otherFields, key, val) + } + + return ld +} + +// check if a given key-value pair represents go-kit log message and return it +func matchLogMessage(key any, val any) (string, bool) { + if strKey, ok := key.(string); !ok || strKey != msgKey { + return "", false + } + + msg, ok := val.(string) + if !ok { + return "", false + } + return msg, true +} + +// check if a given key-value pair represents go-kit log level and return it +func matchLogLevel(key any, val any) (level.Value, bool) { + strKey, ok := key.(string) + if !ok || strKey != levelKey { + return nil, false + } + + levelVal, ok := val.(level.Value) + if !ok { + return nil, false + } + return levelVal, true +} + +//revive:disable:error-return + +// check if a given key-value pair represents an error and return it +func matchError(key any, val any) (error, bool) { + strKey, ok := key.(string) + if !ok || strKey != errKey { + return nil, false + } + + err, ok := val.(error) + if !ok { + return nil, false + } + return err, true +} + +//revive:enable:error-return + +// find a matching zap logging function to be used for a given level +func levelToFunc(logger *zap.SugaredLogger, lvl level.Value) func(string, ...any) { + switch lvl { + case level.DebugValue(): + return logger.Debugw + case level.InfoValue(): + return logger.Infow + case level.WarnValue(): + return logger.Warnw + case level.ErrorValue(): + return logger.Errorw + } + + // default + return logger.Infow +} + +var _ gokitLog.Logger = (*zapToGokitLogAdapter)(nil) diff --git a/pkg/promotel/internal/prometheusreceiver/internal/logger_test.go b/pkg/promotel/internal/prometheusreceiver/internal/logger_test.go new file mode 100644 index 0000000000..27bc78d85a --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/logger_test.go @@ -0,0 +1,287 @@ +package internal + +import ( + "fmt" + "net/http" + "testing" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" +) + +func TestLog(t *testing.T) { + tcs := []struct { + name string + input []any + wantLevel zapcore.Level + wantMessage string + }{ + { + name: "Starting provider", + input: []any{ + "level", + level.DebugValue(), + "msg", + "Starting provider", + "provider", + "string/0", + "subs", + "[target1]", + }, + wantLevel: zapcore.DebugLevel, + wantMessage: "Starting provider", + }, + { + name: "Scrape failed", + input: []any{ + "level", + level.ErrorValue(), + "scrape_pool", + "target1", + "msg", + "Scrape failed", + "err", + "server returned HTTP status 500 Internal Server Error", + }, + wantLevel: zapcore.ErrorLevel, + wantMessage: "Scrape failed", + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + conf := zap.NewProductionConfig() + conf.Level.SetLevel(zapcore.DebugLevel) + + // capture zap log entry + var entry zapcore.Entry + h := func(e zapcore.Entry) error { + entry = e + return nil + } + + logger, err := conf.Build(zap.Hooks(h)) + require.NoError(t, err) + + adapter := NewZapToGokitLogAdapter(logger) + err = adapter.Log(tc.input...) + require.NoError(t, err) + + assert.Equal(t, tc.wantLevel, entry.Level) + assert.Equal(t, tc.wantMessage, entry.Message) + }) + } +} + +func TestExtractLogData(t *testing.T) { + tcs := []struct { + name string + input []any + wantLevel level.Value + wantMessage string + wantOutput []any + }{ + { + name: "nil fields", + input: nil, + wantLevel: level.InfoValue(), // Default + wantMessage: "", + wantOutput: nil, + }, + { + name: "empty fields", + input: []any{}, + wantLevel: level.InfoValue(), // Default + wantMessage: "", + wantOutput: nil, + }, + { + name: "info level", + input: []any{ + "level", + level.InfoValue(), + }, + wantLevel: level.InfoValue(), + wantMessage: "", + wantOutput: nil, + }, + { + name: "warn level", + input: []any{ + "level", + level.WarnValue(), + }, + wantLevel: level.WarnValue(), + wantMessage: "", + wantOutput: nil, + }, + { + name: "error level", + input: []any{ + "level", + level.ErrorValue(), + }, + wantLevel: level.ErrorValue(), + wantMessage: "", + wantOutput: nil, + }, + { + name: "debug level + extra fields", + input: []any{ + "timestamp", + 1596604719, + "level", + level.DebugValue(), + "msg", + "http client error", + }, + wantLevel: level.DebugValue(), + wantMessage: "http client error", + wantOutput: []any{ + "timestamp", 1596604719, + }, + }, + { + name: "missing level field", + input: []any{ + "timestamp", + 1596604719, + "msg", + "http client error", + }, + wantLevel: level.InfoValue(), // Default + wantMessage: "http client error", + wantOutput: []any{ + "timestamp", 1596604719, + }, + }, + { + name: "invalid level type", + input: []any{ + "level", + "warn", // String is not recognized + }, + wantLevel: level.InfoValue(), // Default + wantOutput: []any{ + "level", "warn", // Field is preserved + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + ld := extractLogData(tc.input) + assert.Equal(t, tc.wantLevel, ld.level) + assert.Equal(t, tc.wantMessage, ld.msg) + assert.Equal(t, tc.wantOutput, ld.otherFields) + }) + } +} + +func TestE2E(t *testing.T) { + logger, observed := observer.New(zap.DebugLevel) + gLogger := NewZapToGokitLogAdapter(zap.New(logger)) + + const targetStr = "https://host.docker.internal:5000/prometheus" + + tcs := []struct { + name string + log func() error + wantLevel zapcore.Level + wantMessage string + wantOutput []zapcore.Field + }{ + { + name: "debug level", + log: func() error { + return level.Debug(gLogger).Log() + }, + wantLevel: zapcore.DebugLevel, + wantMessage: "", + wantOutput: []zapcore.Field{}, + }, + { + name: "info level", + log: func() error { + return level.Info(gLogger).Log() + }, + wantLevel: zapcore.InfoLevel, + wantMessage: "", + wantOutput: []zapcore.Field{}, + }, + { + name: "warn level", + log: func() error { + return level.Warn(gLogger).Log() + }, + wantLevel: zapcore.WarnLevel, + wantMessage: "", + wantOutput: []zapcore.Field{}, + }, + { + name: "error level", + log: func() error { + return level.Error(gLogger).Log() + }, + wantLevel: zapcore.ErrorLevel, + wantMessage: "", + wantOutput: []zapcore.Field{}, + }, + { + name: "logger with and msg", + log: func() error { + ngLogger := log.With(gLogger, "scrape_pool", "scrape_pool") + ngLogger = log.With(ngLogger, "target", targetStr) + return level.Debug(ngLogger).Log("msg", "http client error", "err", fmt.Errorf("%s %q: dial tcp 192.168.65.2:5000: connect: connection refused", http.MethodGet, targetStr)) + }, + wantLevel: zapcore.DebugLevel, + wantMessage: "http client error", + wantOutput: []zapcore.Field{ + zap.String("scrape_pool", "scrape_pool"), + zap.String("target", "https://host.docker.internal:5000/prometheus"), + zap.Error(fmt.Errorf("%s %q: dial tcp 192.168.65.2:5000: connect: connection refused", http.MethodGet, targetStr)), + }, + }, + { + name: "missing level", + log: func() error { + ngLogger := log.With(gLogger, "target", "foo") + return ngLogger.Log("msg", "http client error") + }, + wantLevel: zapcore.InfoLevel, // Default + wantMessage: "http client error", + wantOutput: []zapcore.Field{ + zap.String("target", "foo"), + }, + }, + { + name: "invalid level type", + log: func() error { + ngLogger := log.With(gLogger, "target", "foo") + return ngLogger.Log("msg", "http client error", "level", "warn") + }, + wantLevel: zapcore.InfoLevel, // Default + wantMessage: "http client error", + wantOutput: []zapcore.Field{ + zap.String("target", "foo"), + zap.String("level", "warn"), // Field is preserved + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + require.NoError(t, tc.log()) + entries := observed.TakeAll() + require.Len(t, entries, 1) + assert.Equal(t, tc.wantLevel, entries[0].Level) + assert.Equal(t, tc.wantMessage, entries[0].Message) + assert.Equal(t, tc.wantOutput, entries[0].Context) + }) + } +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/metadata.go b/pkg/promotel/internal/prometheusreceiver/internal/metadata.go new file mode 100644 index 0000000000..850df77955 --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/metadata.go @@ -0,0 +1,65 @@ +package internal + +import ( + "github.com/prometheus/common/model" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheus/scrape" +) + +type dataPoint struct { + value float64 + boundary float64 +} + +// internalMetricMetadata allows looking up metadata for internal scrape metrics +var internalMetricMetadata = map[string]*scrape.MetricMetadata{ + scrapeUpMetricName: { + Metric: scrapeUpMetricName, + Type: model.MetricTypeGauge, + Help: "The scraping was successful", + }, + "scrape_duration_seconds": { + Metric: "scrape_duration_seconds", + Unit: "seconds", + Type: model.MetricTypeGauge, + Help: "Duration of the scrape", + }, + "scrape_samples_scraped": { + Metric: "scrape_samples_scraped", + Type: model.MetricTypeGauge, + Help: "The number of samples the target exposed", + }, + "scrape_series_added": { + Metric: "scrape_series_added", + Type: model.MetricTypeGauge, + Help: "The approximate number of new series in this scrape", + }, + "scrape_samples_post_metric_relabeling": { + Metric: "scrape_samples_post_metric_relabeling", + Type: model.MetricTypeGauge, + Help: "The number of samples remaining after metric relabeling was applied", + }, +} + +func metadataForMetric(metricName string, mc scrape.MetricMetadataStore) (*scrape.MetricMetadata, string) { + if metadata, ok := internalMetricMetadata[metricName]; ok { + return metadata, metricName + } + if metadata, ok := mc.GetMetadata(metricName); ok { + return &metadata, metricName + } + // If we didn't find metadata with the original name, + // try with suffixes trimmed, in-case it is a "merged" metric type. + normalizedName := normalizeMetricName(metricName) + if metadata, ok := mc.GetMetadata(normalizedName); ok { + if metadata.Type == model.MetricTypeCounter { + return &metadata, metricName + } + return &metadata, normalizedName + } + // Otherwise, the metric is unknown + return &scrape.MetricMetadata{ + Metric: metricName, + Type: model.MetricTypeUnknown, + }, metricName +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/metadata/generated_status.go b/pkg/promotel/internal/prometheusreceiver/internal/metadata/generated_status.go new file mode 100644 index 0000000000..ae21d33661 --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/metadata/generated_status.go @@ -0,0 +1,16 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" +) + +var ( + Type = component.MustNewType("prometheus") + ScopeName = "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheusreceiver" +) + +const ( + MetricsStability = component.StabilityLevelBeta +) diff --git a/pkg/promotel/internal/prometheusreceiver/internal/metricfamily.go b/pkg/promotel/internal/prometheusreceiver/internal/metricfamily.go new file mode 100644 index 0000000000..44ee3791ce --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/metricfamily.go @@ -0,0 +1,578 @@ +package internal + +import ( + "encoding/hex" + "fmt" + "math" + "sort" + "strings" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheus/scrape" + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/translator/prometheus" +) + +type metricFamily struct { + mtype pmetric.MetricType + // isMonotonic only applies to sums + isMonotonic bool + groups map[uint64]*metricGroup + name string + metadata *scrape.MetricMetadata + groupOrders []*metricGroup +} + +// metricGroup, represents a single metric of a metric family. for example a histogram metric is usually represent by +// a couple data complexValue (buckets and count/sum), a group of a metric family always share a same set of tags. for +// simple types like counter and gauge, each data point is a group of itself +type metricGroup struct { + mtype pmetric.MetricType + ts int64 + ls labels.Labels + count float64 + hasCount bool + sum float64 + hasSum bool + created float64 + value float64 + hValue *histogram.Histogram + fhValue *histogram.FloatHistogram + complexValue []*dataPoint + exemplars pmetric.ExemplarSlice +} + +func newMetricFamily(metricName string, mc scrape.MetricMetadataStore, logger *zap.Logger) *metricFamily { + metadata, familyName := metadataForMetric(metricName, mc) + mtype, isMonotonic := convToMetricType(metadata.Type) + if mtype == pmetric.MetricTypeEmpty { + logger.Debug(fmt.Sprintf("Unknown-typed metric : %s %+v", metricName, metadata)) + } + + return &metricFamily{ + mtype: mtype, + isMonotonic: isMonotonic, + groups: make(map[uint64]*metricGroup), + name: familyName, + metadata: metadata, + } +} + +// includesMetric returns true if the metric is part of the family +func (mf *metricFamily) includesMetric(metricName string) bool { + if mf.mtype != pmetric.MetricTypeGauge { + // If it is a merged family type, then it should match the + // family name when suffixes are trimmed. + return normalizeMetricName(metricName) == mf.name + } + // If it isn't a merged type, the metricName and family name should match + return metricName == mf.name +} + +func (mg *metricGroup) sortPoints() { + sort.Slice(mg.complexValue, func(i, j int) bool { + return mg.complexValue[i].boundary < mg.complexValue[j].boundary + }) +} + +func (mg *metricGroup) toDistributionPoint(dest pmetric.HistogramDataPointSlice) { + if !mg.hasCount { + return + } + + mg.sortPoints() + + bucketCount := len(mg.complexValue) + 1 + // if the final bucket is +Inf, we ignore it + if bucketCount > 1 && mg.complexValue[bucketCount-2].boundary == math.Inf(1) { + bucketCount-- + } + + // for OTLP the bounds won't include +inf + bounds := make([]float64, bucketCount-1) + bucketCounts := make([]uint64, bucketCount) + var adjustedCount float64 + + pointIsStale := value.IsStaleNaN(mg.sum) || value.IsStaleNaN(mg.count) + for i := 0; i < bucketCount-1; i++ { + bounds[i] = mg.complexValue[i].boundary + adjustedCount = mg.complexValue[i].value + + // Buckets still need to be sent to know to set them as stale, + // but a staleness NaN converted to uint64 would be an extremely large number. + // Setting to 0 instead. + if pointIsStale { + adjustedCount = 0 + } else if i != 0 { + adjustedCount -= mg.complexValue[i-1].value + } + bucketCounts[i] = uint64(adjustedCount) + } + + // Add the final bucket based on the total count + adjustedCount = mg.count + if pointIsStale { + adjustedCount = 0 + } else if bucketCount > 1 { + adjustedCount -= mg.complexValue[bucketCount-2].value + } + bucketCounts[bucketCount-1] = uint64(adjustedCount) + + point := dest.AppendEmpty() + + if pointIsStale { + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + } else { + point.SetCount(uint64(mg.count)) + if mg.hasSum { + point.SetSum(mg.sum) + } + } + + point.ExplicitBounds().FromRaw(bounds) + point.BucketCounts().FromRaw(bucketCounts) + + // The timestamp MUST be in retrieved from milliseconds and converted to nanoseconds. + tsNanos := timestampFromMs(mg.ts) + if mg.created != 0 { + point.SetStartTimestamp(timestampFromFloat64(mg.created)) + } else { + // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp + point.SetStartTimestamp(tsNanos) + } + point.SetTimestamp(tsNanos) + populateAttributes(pmetric.MetricTypeHistogram, mg.ls, point.Attributes()) + mg.setExemplars(point.Exemplars()) +} + +// toExponentialHistogramDataPoints is based on +// https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms +func (mg *metricGroup) toExponentialHistogramDataPoints(dest pmetric.ExponentialHistogramDataPointSlice) { + if !mg.hasCount { + return + } + point := dest.AppendEmpty() + point.SetTimestamp(timestampFromMs(mg.ts)) + + // We do not set Min or Max as native histograms don't have that information. + switch { + case mg.fhValue != nil: + fh := mg.fhValue + + if value.IsStaleNaN(fh.Sum) { + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + // The count and sum are initialized to 0, so we don't need to set them. + } else { + point.SetScale(fh.Schema) + // Input is a float native histogram. This conversion will lose + // precision,but we don't actually expect float histograms in scrape, + // since these are typically the result of operations on integer + // native histograms in the database. + point.SetCount(uint64(fh.Count)) + point.SetSum(fh.Sum) + point.SetZeroThreshold(fh.ZeroThreshold) + point.SetZeroCount(uint64(fh.ZeroCount)) + + if len(fh.PositiveSpans) > 0 { + point.Positive().SetOffset(fh.PositiveSpans[0].Offset - 1) // -1 because OTEL offset are for the lower bound, not the upper bound + convertAbsoluteBuckets(fh.PositiveSpans, fh.PositiveBuckets, point.Positive().BucketCounts()) + } + if len(fh.NegativeSpans) > 0 { + point.Negative().SetOffset(fh.NegativeSpans[0].Offset - 1) // -1 because OTEL offset are for the lower bound, not the upper bound + convertAbsoluteBuckets(fh.NegativeSpans, fh.NegativeBuckets, point.Negative().BucketCounts()) + } + } + + case mg.hValue != nil: + h := mg.hValue + + if value.IsStaleNaN(h.Sum) { + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + // The count and sum are initialized to 0, so we don't need to set them. + } else { + point.SetScale(h.Schema) + point.SetCount(h.Count) + point.SetSum(h.Sum) + point.SetZeroThreshold(h.ZeroThreshold) + point.SetZeroCount(h.ZeroCount) + + if len(h.PositiveSpans) > 0 { + point.Positive().SetOffset(h.PositiveSpans[0].Offset - 1) // -1 because OTEL offset are for the lower bound, not the upper bound + convertDeltaBuckets(h.PositiveSpans, h.PositiveBuckets, point.Positive().BucketCounts()) + } + if len(h.NegativeSpans) > 0 { + point.Negative().SetOffset(h.NegativeSpans[0].Offset - 1) // -1 because OTEL offset are for the lower bound, not the upper bound + convertDeltaBuckets(h.NegativeSpans, h.NegativeBuckets, point.Negative().BucketCounts()) + } + } + + default: + // This should never happen. + return + } + + tsNanos := timestampFromMs(mg.ts) + if mg.created != 0 { + point.SetStartTimestamp(timestampFromFloat64(mg.created)) + } else { + // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp + point.SetStartTimestamp(tsNanos) + } + point.SetTimestamp(tsNanos) + populateAttributes(pmetric.MetricTypeHistogram, mg.ls, point.Attributes()) + mg.setExemplars(point.Exemplars()) +} + +func convertDeltaBuckets(spans []histogram.Span, deltas []int64, buckets pcommon.UInt64Slice) { + buckets.EnsureCapacity(len(deltas)) + bucketIdx := 0 + bucketCount := int64(0) + for spanIdx, span := range spans { + if spanIdx > 0 { + for i := int32(0); i < span.Offset; i++ { + buckets.Append(uint64(0)) + } + } + for i := uint32(0); i < span.Length; i++ { + bucketCount += deltas[bucketIdx] + bucketIdx++ + buckets.Append(uint64(bucketCount)) // nolint + } + } +} + +func convertAbsoluteBuckets(spans []histogram.Span, counts []float64, buckets pcommon.UInt64Slice) { + buckets.EnsureCapacity(len(counts)) + bucketIdx := 0 + for spanIdx, span := range spans { + if spanIdx > 0 { + for i := int32(0); i < span.Offset; i++ { + buckets.Append(uint64(0)) + } + } + for i := uint32(0); i < span.Length; i++ { + buckets.Append(uint64(counts[bucketIdx])) + bucketIdx++ + } + } +} + +func (mg *metricGroup) setExemplars(exemplars pmetric.ExemplarSlice) { + if mg == nil { + return + } + if mg.exemplars.Len() > 0 { + mg.exemplars.MoveAndAppendTo(exemplars) + } +} + +func (mg *metricGroup) toSummaryPoint(dest pmetric.SummaryDataPointSlice) { + // expecting count to be provided, however, in the following two cases, they can be missed. + // 1. data is corrupted + // 2. ignored by startValue evaluation + if !mg.hasCount { + return + } + + mg.sortPoints() + + point := dest.AppendEmpty() + pointIsStale := value.IsStaleNaN(mg.sum) || value.IsStaleNaN(mg.count) + if pointIsStale { + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + } else { + if mg.hasSum { + point.SetSum(mg.sum) + } + point.SetCount(uint64(mg.count)) + } + + quantileValues := point.QuantileValues() + for _, p := range mg.complexValue { + quantile := quantileValues.AppendEmpty() + // Quantiles still need to be sent to know to set them as stale, + // but a staleness NaN converted to uint64 would be an extremely large number. + // By not setting the quantile value, it will default to 0. + if !pointIsStale { + quantile.SetValue(p.value) + } + quantile.SetQuantile(p.boundary) + } + + // Based on the summary description from https://prometheus.io/docs/concepts/metric_types/#summary + // the quantiles are calculated over a sliding time window, however, the count is the total count of + // observations and the corresponding sum is a sum of all observed values, thus the sum and count used + // at the global level of the metricspb.SummaryValue + // The timestamp MUST be in retrieved from milliseconds and converted to nanoseconds. + tsNanos := timestampFromMs(mg.ts) + point.SetTimestamp(tsNanos) + if mg.created != 0 { + point.SetStartTimestamp(timestampFromFloat64(mg.created)) + } else { + // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp + point.SetStartTimestamp(tsNanos) + } + populateAttributes(pmetric.MetricTypeSummary, mg.ls, point.Attributes()) +} + +func (mg *metricGroup) toNumberDataPoint(dest pmetric.NumberDataPointSlice) { + tsNanos := timestampFromMs(mg.ts) + point := dest.AppendEmpty() + // gauge/undefined types have no start time. + if mg.mtype == pmetric.MetricTypeSum { + if mg.created != 0 { + point.SetStartTimestamp(timestampFromFloat64(mg.created)) + } else { + // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp + point.SetStartTimestamp(tsNanos) + } + } + point.SetTimestamp(tsNanos) + if value.IsStaleNaN(mg.value) { + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + } else { + point.SetDoubleValue(mg.value) + } + populateAttributes(pmetric.MetricTypeGauge, mg.ls, point.Attributes()) + mg.setExemplars(point.Exemplars()) +} + +func populateAttributes(mType pmetric.MetricType, ls labels.Labels, dest pcommon.Map) { + dest.EnsureCapacity(ls.Len()) + names := getSortedNotUsefulLabels(mType) + j := 0 + ls.Range(func(l labels.Label) { + for j < len(names) && names[j] < l.Name { + j++ + } + if j < len(names) && l.Name == names[j] { + return + } + if l.Value == "" { + // empty label values should be omitted + return + } + dest.PutStr(l.Name, l.Value) + }) +} + +func (mf *metricFamily) loadMetricGroupOrCreate(groupKey uint64, ls labels.Labels, ts int64) *metricGroup { + mg, ok := mf.groups[groupKey] + if !ok { + mg = &metricGroup{ + mtype: mf.mtype, + ts: ts, + ls: ls, + exemplars: pmetric.NewExemplarSlice(), + } + mf.groups[groupKey] = mg + // maintaining data insertion order is helpful to generate stable/reproducible metric output + mf.groupOrders = append(mf.groupOrders, mg) + } + return mg +} + +func (mf *metricFamily) addSeries(seriesRef uint64, metricName string, ls labels.Labels, t int64, v float64) error { + mg := mf.loadMetricGroupOrCreate(seriesRef, ls, t) + if mg.ts != t { + return fmt.Errorf("inconsistent timestamps on metric points for metric %v", metricName) + } + switch mf.mtype { + case pmetric.MetricTypeHistogram, pmetric.MetricTypeSummary: + switch { + case strings.HasSuffix(metricName, metricsSuffixSum): + mg.sum = v + mg.hasSum = true + case strings.HasSuffix(metricName, metricsSuffixCount): + // always use the timestamp from count, because is the only required field for histograms and summaries. + mg.ts = t + mg.count = v + mg.hasCount = true + case metricName == mf.metadata.Metric+metricSuffixCreated: + mg.created = v + default: + boundary, err := getBoundary(mf.mtype, ls) + if err != nil { + return err + } + mg.complexValue = append(mg.complexValue, &dataPoint{value: v, boundary: boundary}) + } + case pmetric.MetricTypeExponentialHistogram: + if metricName == mf.metadata.Metric+metricSuffixCreated { + mg.created = v + } + case pmetric.MetricTypeSum: + if metricName == mf.metadata.Metric+metricSuffixCreated { + mg.created = v + } else { + mg.value = v + } + case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge: + fallthrough + default: + mg.value = v + } + + return nil +} + +func (mf *metricFamily) addExponentialHistogramSeries(seriesRef uint64, metricName string, ls labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) error { + mg := mf.loadMetricGroupOrCreate(seriesRef, ls, t) + if mg.ts != t { + return fmt.Errorf("inconsistent timestamps on metric points for metric %v", metricName) + } + if mg.mtype != pmetric.MetricTypeExponentialHistogram { + return fmt.Errorf("metric type mismatch for exponential histogram metric %v type %s", metricName, mg.mtype.String()) + } + switch { + case fh != nil: + if mg.hValue != nil { + return fmt.Errorf("exponential histogram %v already has float counts", metricName) + } + mg.count = fh.Count + mg.sum = fh.Sum + mg.hasCount = true + mg.hasSum = true + mg.fhValue = fh + case h != nil: + if mg.fhValue != nil { + return fmt.Errorf("exponential histogram %v already has integer counts", metricName) + } + mg.count = float64(h.Count) + mg.sum = h.Sum + mg.hasCount = true + mg.hasSum = true + mg.hValue = h + } + return nil +} + +func (mf *metricFamily) appendMetric(metrics pmetric.MetricSlice, trimSuffixes bool) { + metric := pmetric.NewMetric() + // Trims type and unit suffixes from metric name + name := mf.name + if trimSuffixes { + name = prometheus.TrimPromSuffixes(name, mf.mtype, mf.metadata.Unit) + } + metric.SetName(name) + metric.SetDescription(mf.metadata.Help) + metric.SetUnit(prometheus.UnitWordToUCUM(mf.metadata.Unit)) + metric.Metadata().PutStr(prometheus.MetricMetadataTypeKey, string(mf.metadata.Type)) + + var pointCount int + + switch mf.mtype { + case pmetric.MetricTypeHistogram: + histogram := metric.SetEmptyHistogram() + histogram.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + hdpL := histogram.DataPoints() + for _, mg := range mf.groupOrders { + mg.toDistributionPoint(hdpL) + } + pointCount = hdpL.Len() + + case pmetric.MetricTypeSummary: + summary := metric.SetEmptySummary() + sdpL := summary.DataPoints() + for _, mg := range mf.groupOrders { + mg.toSummaryPoint(sdpL) + } + pointCount = sdpL.Len() + + case pmetric.MetricTypeSum: + sum := metric.SetEmptySum() + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum.SetIsMonotonic(mf.isMonotonic) + sdpL := sum.DataPoints() + for _, mg := range mf.groupOrders { + mg.toNumberDataPoint(sdpL) + } + pointCount = sdpL.Len() + + case pmetric.MetricTypeExponentialHistogram: + histogram := metric.SetEmptyExponentialHistogram() + histogram.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + hdpL := histogram.DataPoints() + for _, mg := range mf.groupOrders { + mg.toExponentialHistogramDataPoints(hdpL) + } + pointCount = hdpL.Len() + + case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge: + fallthrough + default: // Everything else should be set to a Gauge. + gauge := metric.SetEmptyGauge() + gdpL := gauge.DataPoints() + for _, mg := range mf.groupOrders { + mg.toNumberDataPoint(gdpL) + } + pointCount = gdpL.Len() + } + + if pointCount == 0 { + return + } + + metric.MoveTo(metrics.AppendEmpty()) +} + +func (mf *metricFamily) addExemplar(seriesRef uint64, e exemplar.Exemplar) { + mg := mf.groups[seriesRef] + if mg == nil { + return + } + es := mg.exemplars + convertExemplar(e, es.AppendEmpty()) +} + +func convertExemplar(pe exemplar.Exemplar, e pmetric.Exemplar) { + e.SetTimestamp(timestampFromMs(pe.Ts)) + e.SetDoubleValue(pe.Value) + e.FilteredAttributes().EnsureCapacity(pe.Labels.Len()) + pe.Labels.Range(func(lb labels.Label) { + switch strings.ToLower(lb.Name) { + case prometheus.ExemplarTraceIDKey: + var tid [16]byte + err := decodeAndCopyToLowerBytes(tid[:], []byte(lb.Value)) + if err == nil { + e.SetTraceID(tid) + } else { + e.FilteredAttributes().PutStr(lb.Name, lb.Value) + } + case prometheus.ExemplarSpanIDKey: + var sid [8]byte + err := decodeAndCopyToLowerBytes(sid[:], []byte(lb.Value)) + if err == nil { + e.SetSpanID(sid) + } else { + e.FilteredAttributes().PutStr(lb.Name, lb.Value) + } + default: + e.FilteredAttributes().PutStr(lb.Name, lb.Value) + } + }) +} + +/* + decodeAndCopyToLowerBytes copies src to dst on lower bytes instead of higher + +1. If len(src) > len(dst) -> copy first len(dst) bytes as it is. Example -> src = []byte{0xab,0xcd,0xef,0xgh,0xij}, dst = [2]byte, result dst = [2]byte{0xab, 0xcd} +2. If len(src) = len(dst) -> copy src to dst as it is +3. If len(src) < len(dst) -> prepend required 0s and then add src to dst. Example -> src = []byte{0xab, 0xcd}, dst = [8]byte, result dst = [8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xcd} +*/ +func decodeAndCopyToLowerBytes(dst []byte, src []byte) error { + var err error + decodedLen := hex.DecodedLen(len(src)) + if decodedLen >= len(dst) { + _, err = hex.Decode(dst, src[:hex.EncodedLen(len(dst))]) + } else { + _, err = hex.Decode(dst[len(dst)-decodedLen:], src) + } + return err +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/metricfamily_test.go b/pkg/promotel/internal/prometheusreceiver/internal/metricfamily_test.go new file mode 100644 index 0000000000..697432bfac --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/metricfamily_test.go @@ -0,0 +1,904 @@ +package internal + +import ( + "math" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheus/scrape" +) + +type testMetadataStore map[string]scrape.MetricMetadata + +func (tmc testMetadataStore) GetMetadata(familyName string) (scrape.MetricMetadata, bool) { + lookup, ok := tmc[familyName] + return lookup, ok +} + +func (tmc testMetadataStore) ListMetadata() []scrape.MetricMetadata { return nil } + +func (tmc testMetadataStore) SizeMetadata() int { return 0 } + +func (tmc testMetadataStore) LengthMetadata() int { + return len(tmc) +} + +var mc = testMetadataStore{ + "counter": scrape.MetricMetadata{ + Metric: "cr", + Type: model.MetricTypeCounter, + Help: "This is some help for a counter", + Unit: "By", + }, + "counter_created": scrape.MetricMetadata{ + Metric: "counter", + Type: model.MetricTypeCounter, + Help: "This is some help for a counter", + Unit: "By", + }, + "gauge": scrape.MetricMetadata{ + Metric: "ge", + Type: model.MetricTypeGauge, + Help: "This is some help for a gauge", + Unit: "1", + }, + "gaugehistogram": scrape.MetricMetadata{ + Metric: "gh", + Type: model.MetricTypeGaugeHistogram, + Help: "This is some help for a gauge histogram", + Unit: "?", + }, + "histogram": scrape.MetricMetadata{ + Metric: "hg", + Type: model.MetricTypeHistogram, + Help: "This is some help for a histogram", + Unit: "ms", + }, + "histogram_with_created": scrape.MetricMetadata{ + Metric: "histogram_with_created", + Type: model.MetricTypeHistogram, + Help: "This is some help for a histogram", + Unit: "ms", + }, + "histogram_stale": scrape.MetricMetadata{ + Metric: "hg_stale", + Type: model.MetricTypeHistogram, + Help: "This is some help for a histogram", + Unit: "ms", + }, + "summary": scrape.MetricMetadata{ + Metric: "s", + Type: model.MetricTypeSummary, + Help: "This is some help for a summary", + Unit: "ms", + }, + "summary_with_created": scrape.MetricMetadata{ + Metric: "summary_with_created", + Type: model.MetricTypeSummary, + Help: "This is some help for a summary", + Unit: "ms", + }, + "summary_stale": scrape.MetricMetadata{ + Metric: "s_stale", + Type: model.MetricTypeSummary, + Help: "This is some help for a summary", + Unit: "ms", + }, + "unknown": scrape.MetricMetadata{ + Metric: "u", + Type: model.MetricTypeUnknown, + Help: "This is some help for an unknown metric", + Unit: "?", + }, +} + +func TestMetricGroupData_toDistributionUnitTest(t *testing.T) { + type scrape struct { + at int64 + value float64 + metric string + extraLabel labels.Label + } + tests := []struct { + name string + metricName string + labels labels.Labels + scrapes []*scrape + want func() pmetric.HistogramDataPoint + wantErr bool + intervalStartTimeMs int64 + }{ + { + name: "histogram with startTimestamp", + metricName: "histogram", + intervalStartTimeMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 11, value: 66, metric: "histogram_count"}, + {at: 11, value: 1004.78, metric: "histogram_sum"}, + {at: 11, value: 33, metric: "histogram_bucket", extraLabel: labels.Label{Name: "le", Value: "0.75"}}, + {at: 11, value: 55, metric: "histogram_bucket", extraLabel: labels.Label{Name: "le", Value: "2.75"}}, + {at: 11, value: 66, metric: "histogram_bucket", extraLabel: labels.Label{Name: "le", Value: "+Inf"}}, + }, + want: func() pmetric.HistogramDataPoint { + point := pmetric.NewHistogramDataPoint() + point.SetCount(66) + point.SetSum(1004.78) + point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.ExplicitBounds().FromRaw([]float64{0.75, 2.75}) + point.BucketCounts().FromRaw([]uint64{33, 22, 11}) + point.SetStartTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + name: "histogram with startTimestamp from _created", + metricName: "histogram_with_created", + intervalStartTimeMs: 11, + labels: labels.FromMap(map[string]string{"a": "A"}), + scrapes: []*scrape{ + {at: 11, value: 66, metric: "histogram_with_created_count"}, + {at: 11, value: 1004.78, metric: "histogram_with_created_sum"}, + {at: 11, value: 600.78, metric: "histogram_with_created_created"}, + { + at: 11, + value: 33, + metric: "histogram_with_created_bucket", + extraLabel: labels.Label{Name: "le", Value: "0.75"}, + }, + { + at: 11, + value: 55, + metric: "histogram_with_created_bucket", + extraLabel: labels.Label{Name: "le", Value: "2.75"}, + }, + { + at: 11, + value: 66, + metric: "histogram_with_created_bucket", + extraLabel: labels.Label{Name: "le", Value: "+Inf"}, + }, + }, + want: func() pmetric.HistogramDataPoint { + point := pmetric.NewHistogramDataPoint() + point.SetCount(66) + point.SetSum(1004.78) + + // the time in milliseconds -> nanoseconds. + point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) + point.SetStartTimestamp(timestampFromFloat64(600.78)) + + point.ExplicitBounds().FromRaw([]float64{0.75, 2.75}) + point.BucketCounts().FromRaw([]uint64{33, 22, 11}) + attributes := point.Attributes() + attributes.PutStr("a", "A") + return point + }, + }, + { + name: "histogram that is stale", + metricName: "histogram_stale", + intervalStartTimeMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 11, value: math.Float64frombits(value.StaleNaN), metric: "histogram_stale_count"}, + {at: 11, value: math.Float64frombits(value.StaleNaN), metric: "histogram_stale_sum"}, + {at: 11, value: math.Float64frombits(value.StaleNaN), metric: "histogram_bucket", extraLabel: labels.Label{Name: "le", Value: "0.75"}}, + {at: 11, value: math.Float64frombits(value.StaleNaN), metric: "histogram_bucket", extraLabel: labels.Label{Name: "le", Value: "2.75"}}, + {at: 11, value: math.Float64frombits(value.StaleNaN), metric: "histogram_bucket", extraLabel: labels.Label{Name: "le", Value: "+Inf"}}, + }, + want: func() pmetric.HistogramDataPoint { + point := pmetric.NewHistogramDataPoint() + point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + point.ExplicitBounds().FromRaw([]float64{0.75, 2.75}) + point.BucketCounts().FromRaw([]uint64{0, 0, 0}) + point.SetStartTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + name: "histogram with inconsistent timestamps", + metricName: "histogram_inconsistent_ts", + intervalStartTimeMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "le": "0.75", "b": "B"}), + scrapes: []*scrape{ + {at: 11, value: math.Float64frombits(value.StaleNaN), metric: "histogram_stale_count"}, + {at: 12, value: math.Float64frombits(value.StaleNaN), metric: "histogram_stale_sum"}, + {at: 13, value: math.Float64frombits(value.StaleNaN), metric: "value"}, + }, + wantErr: true, + }, + { + name: "histogram without buckets", + metricName: "histogram", + intervalStartTimeMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 11, value: 66, metric: "histogram_count"}, + {at: 11, value: 1004.78, metric: "histogram_sum"}, + }, + want: func() pmetric.HistogramDataPoint { + point := pmetric.NewHistogramDataPoint() + point.SetCount(66) + point.SetSum(1004.78) + point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.BucketCounts().FromRaw([]uint64{66}) + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + mp := newMetricFamily(tt.metricName, mc, zap.NewNop()) + for i, tv := range tt.scrapes { + var lbls labels.Labels + if tv.extraLabel.Name != "" { + lbls = labels.NewBuilder(tt.labels).Set(tv.extraLabel.Name, tv.extraLabel.Value).Labels() + } else { + lbls = tt.labels.Copy() + } + sRef, _ := getSeriesRef(nil, lbls, mp.mtype) + err := mp.addSeries(sRef, tv.metric, lbls, tv.at, tv.value) + if tt.wantErr { + if i != 0 { + require.Error(t, err) + } + } else { + require.NoError(t, err) + } + } + if tt.wantErr { + // Don't check the result if we got an error + return + } + + require.Len(t, mp.groups, 1) + + sl := pmetric.NewMetricSlice() + mp.appendMetric(sl, false) + + require.Equal(t, 1, sl.Len(), "Exactly one metric expected") + metric := sl.At(0) + require.Equal(t, mc[tt.metricName].Help, metric.Description(), "Expected help metadata in metric description") + require.Equal(t, mc[tt.metricName].Unit, metric.Unit(), "Expected unit metadata in metric") + + hdpL := metric.Histogram().DataPoints() + require.Equal(t, 1, hdpL.Len(), "Exactly one point expected") + got := hdpL.At(0) + want := tt.want() + require.Equal(t, want, got, "Expected the points to be equal") + }) + } +} + +func TestMetricGroupData_toExponentialDistributionUnitTest(t *testing.T) { + type scrape struct { + at int64 + metric string + extraLabel labels.Label + + // Only one kind of value should be set. + value float64 + integerHistogram *histogram.Histogram + floatHistogram *histogram.FloatHistogram // TODO: add tests for float histograms. + } + tests := []struct { + name string + metricName string + labels labels.Labels + scrapes []*scrape + want func() pmetric.ExponentialHistogramDataPoint + wantErr bool + intervalStartTimeMs int64 + }{ + { + name: "integer histogram with startTimestamp", + metricName: "request_duration_seconds", + intervalStartTimeMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + { + at: 11, + metric: "request_duration_seconds", + integerHistogram: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Schema: 1, + ZeroThreshold: 0.42, + ZeroCount: 1, + Count: 66, + Sum: 1004.78, + PositiveSpans: []histogram.Span{{Offset: 1, Length: 2}, {Offset: 3, Length: 1}}, + PositiveBuckets: []int64{33, -30, 26}, // Delta encoded counts: 33, 3=(33-30), 30=(3+27) -> 65 + NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{1}, // Delta encoded counts: 1 + }, + }, + }, + want: func() pmetric.ExponentialHistogramDataPoint { + point := pmetric.NewExponentialHistogramDataPoint() + point.SetCount(66) + point.SetSum(1004.78) + point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetScale(1) + point.SetZeroThreshold(0.42) + point.SetZeroCount(1) + point.Positive().SetOffset(0) + point.Positive().BucketCounts().FromRaw([]uint64{33, 3, 0, 0, 0, 29}) + point.Negative().SetOffset(-1) + point.Negative().BucketCounts().FromRaw([]uint64{1}) + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + name: "integer histogram with startTimestamp from _created", + metricName: "request_duration_seconds", + intervalStartTimeMs: 11, + labels: labels.FromMap(map[string]string{"a": "A"}), + scrapes: []*scrape{ + { + at: 11, + metric: "request_duration_seconds", + integerHistogram: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Schema: 1, + ZeroThreshold: 0.42, + ZeroCount: 1, + Count: 66, + Sum: 1004.78, + PositiveSpans: []histogram.Span{{Offset: 1, Length: 2}, {Offset: 3, Length: 1}}, + PositiveBuckets: []int64{33, -30, 26}, // Delta encoded counts: 33, 3=(33-30), 30=(3+27) -> 65 + NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{1}, // Delta encoded counts: 1 + }, + }, + { + at: 11, + metric: "request_duration_seconds_created", + value: 600.78, + }, + }, + want: func() pmetric.ExponentialHistogramDataPoint { + point := pmetric.NewExponentialHistogramDataPoint() + point.SetCount(66) + point.SetSum(1004.78) + point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(timestampFromFloat64(600.78)) // the time in milliseconds -> nanoseconds. + point.SetScale(1) + point.SetZeroThreshold(0.42) + point.SetZeroCount(1) + point.Positive().SetOffset(0) + point.Positive().BucketCounts().FromRaw([]uint64{33, 3, 0, 0, 0, 29}) + point.Negative().SetOffset(-1) + point.Negative().BucketCounts().FromRaw([]uint64{1}) + attributes := point.Attributes() + attributes.PutStr("a", "A") + return point + }, + }, + { + name: "integer histogram that is stale", + metricName: "request_duration_seconds", + intervalStartTimeMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + { + at: 11, + metric: "request_duration_seconds", + integerHistogram: &histogram.Histogram{ + Sum: math.Float64frombits(value.StaleNaN), + }, + }, + }, + want: func() pmetric.ExponentialHistogramDataPoint { + point := pmetric.NewExponentialHistogramDataPoint() + point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + point.SetStartTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + mp := newMetricFamily(tt.metricName, mc, zap.NewNop()) + for i, tv := range tt.scrapes { + var lbls labels.Labels + if tv.extraLabel.Name != "" { + lbls = labels.NewBuilder(tt.labels).Set(tv.extraLabel.Name, tv.extraLabel.Value).Labels() + } else { + lbls = tt.labels.Copy() + } + + var err error + switch { + case tv.integerHistogram != nil: + mp.mtype = pmetric.MetricTypeExponentialHistogram + sRef, _ := getSeriesRef(nil, lbls, mp.mtype) + err = mp.addExponentialHistogramSeries(sRef, tv.metric, lbls, tv.at, tv.integerHistogram, nil) + case tv.floatHistogram != nil: + mp.mtype = pmetric.MetricTypeExponentialHistogram + sRef, _ := getSeriesRef(nil, lbls, mp.mtype) + err = mp.addExponentialHistogramSeries(sRef, tv.metric, lbls, tv.at, nil, tv.floatHistogram) + default: + sRef, _ := getSeriesRef(nil, lbls, mp.mtype) + err = mp.addSeries(sRef, tv.metric, lbls, tv.at, tv.value) + } + if tt.wantErr { + if i != 0 { + require.Error(t, err) + } + } else { + require.NoError(t, err) + } + } + if tt.wantErr { + // Don't check the result if we got an error + return + } + + require.Len(t, mp.groups, 1) + + sl := pmetric.NewMetricSlice() + mp.appendMetric(sl, false) + + require.Equal(t, 1, sl.Len(), "Exactly one metric expected") + metric := sl.At(0) + require.Equal(t, mc[tt.metricName].Help, metric.Description(), "Expected help metadata in metric description") + require.Equal(t, mc[tt.metricName].Unit, metric.Unit(), "Expected unit metadata in metric") + + hdpL := metric.ExponentialHistogram().DataPoints() + require.Equal(t, 1, hdpL.Len(), "Exactly one point expected") + got := hdpL.At(0) + want := tt.want() + require.Equal(t, want, got, "Expected the points to be equal") + }) + } +} + +func TestMetricGroupData_toSummaryUnitTest(t *testing.T) { + type scrape struct { + at int64 + value float64 + metric string + } + + type labelsScrapes struct { + labels labels.Labels + scrapes []*scrape + } + tests := []struct { + name string + labelsScrapes []*labelsScrapes + want func() pmetric.SummaryDataPoint + wantErr bool + }{ + { + name: "summary", + labelsScrapes: []*labelsScrapes{ + { + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 10, metric: "summary_count"}, + {at: 14, value: 15, metric: "summary_sum"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.0", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 8, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.75", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 33.7, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.50", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 27, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.90", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 56, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.99", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 82, metric: "value"}, + }, + }, + }, + want: func() pmetric.SummaryDataPoint { + point := pmetric.NewSummaryDataPoint() + point.SetCount(10) + point.SetSum(15) + qtL := point.QuantileValues() + qn0 := qtL.AppendEmpty() + qn0.SetQuantile(0) + qn0.SetValue(8) + qn50 := qtL.AppendEmpty() + qn50.SetQuantile(.5) + qn50.SetValue(27) + qn75 := qtL.AppendEmpty() + qn75.SetQuantile(.75) + qn75.SetValue(33.7) + qn90 := qtL.AppendEmpty() + qn90.SetQuantile(.9) + qn90.SetValue(56) + qn99 := qtL.AppendEmpty() + qn99.SetQuantile(.99) + qn99.SetValue(82) + point.SetTimestamp(pcommon.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + name: "summary_with_created", + labelsScrapes: []*labelsScrapes{ + { + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 10, metric: "summary_with_created_count"}, + {at: 14, value: 15, metric: "summary_with_created_sum"}, + {at: 14, value: 150, metric: "summary_with_created_created"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.0", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 8, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.75", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 33.7, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.50", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 27, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.90", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 56, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.99", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 82, metric: "value"}, + }, + }, + }, + want: func() pmetric.SummaryDataPoint { + point := pmetric.NewSummaryDataPoint() + point.SetCount(10) + point.SetSum(15) + qtL := point.QuantileValues() + qn0 := qtL.AppendEmpty() + qn0.SetQuantile(0) + qn0.SetValue(8) + qn50 := qtL.AppendEmpty() + qn50.SetQuantile(.5) + qn50.SetValue(27) + qn75 := qtL.AppendEmpty() + qn75.SetQuantile(.75) + qn75.SetValue(33.7) + qn90 := qtL.AppendEmpty() + qn90.SetQuantile(.9) + qn90.SetValue(56) + qn99 := qtL.AppendEmpty() + qn99.SetQuantile(.99) + qn99.SetValue(82) + + // the time in milliseconds -> nanoseconds. + point.SetTimestamp(pcommon.Timestamp(14 * time.Millisecond)) + point.SetStartTimestamp(timestampFromFloat64(150)) + + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + name: "summary_stale", + labelsScrapes: []*labelsScrapes{ + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.0", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 10, metric: "summary_stale_count"}, + {at: 14, value: 12, metric: "summary_stale_sum"}, + {at: 14, value: 8, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.75", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 10, metric: "summary_stale_count"}, + {at: 14, value: 1004.78, metric: "summary_stale_sum"}, + {at: 14, value: 33.7, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.50", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 10, metric: "summary_stale_count"}, + {at: 14, value: 13, metric: "summary_stale_sum"}, + {at: 14, value: 27, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.90", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 10, metric: "summary_stale_count"}, + {at: 14, value: 14, metric: "summary_stale_sum"}, + {at: 14, value: 56, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.99", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: math.Float64frombits(value.StaleNaN), metric: "summary_stale_count"}, + {at: 14, value: math.Float64frombits(value.StaleNaN), metric: "summary_stale_sum"}, + {at: 14, value: math.Float64frombits(value.StaleNaN), metric: "value"}, + }, + }, + }, + want: func() pmetric.SummaryDataPoint { + point := pmetric.NewSummaryDataPoint() + qtL := point.QuantileValues() + qn0 := qtL.AppendEmpty() + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + qn0.SetQuantile(0) + qn0.SetValue(0) + qn50 := qtL.AppendEmpty() + qn50.SetQuantile(.5) + qn50.SetValue(0) + qn75 := qtL.AppendEmpty() + qn75.SetQuantile(.75) + qn75.SetValue(0) + qn90 := qtL.AppendEmpty() + qn90.SetQuantile(.9) + qn90.SetValue(0) + qn99 := qtL.AppendEmpty() + qn99.SetQuantile(.99) + qn99.SetValue(0) + point.SetTimestamp(pcommon.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + name: "summary with inconsistent timestamps", + labelsScrapes: []*labelsScrapes{ + { + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 11, value: 10, metric: "summary_count"}, + {at: 14, value: 15, metric: "summary_sum"}, + }, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + mp := newMetricFamily(tt.name, mc, zap.NewNop()) + for _, lbs := range tt.labelsScrapes { + for i, scrape := range lbs.scrapes { + lb := lbs.labels.Copy() + sRef, _ := getSeriesRef(nil, lb, mp.mtype) + err := mp.addSeries(sRef, scrape.metric, lb, scrape.at, scrape.value) + if tt.wantErr { + // The first scrape won't have an error + if i != 0 { + require.Error(t, err) + } + } else { + require.NoError(t, err) + } + } + } + if tt.wantErr { + // Don't check the result if we got an error + return + } + + require.Len(t, mp.groups, 1) + + sl := pmetric.NewMetricSlice() + mp.appendMetric(sl, false) + + require.Equal(t, 1, sl.Len(), "Exactly one metric expected") + metric := sl.At(0) + require.Equal(t, mc[tt.name].Help, metric.Description(), "Expected help metadata in metric description") + require.Equal(t, mc[tt.name].Unit, metric.Unit(), "Expected unit metadata in metric") + + sdpL := metric.Summary().DataPoints() + require.Equal(t, 1, sdpL.Len(), "Exactly one point expected") + got := sdpL.At(0) + want := tt.want() + require.Equal(t, want, got, "Expected the points to be equal") + }) + } +} + +func TestMetricGroupData_toNumberDataUnitTest(t *testing.T) { + type scrape struct { + at int64 + value float64 + metric string + } + tests := []struct { + name string + metricKind string + labels labels.Labels + scrapes []*scrape + intervalStartTimestampMs int64 + want func() pmetric.NumberDataPoint + }{ + { + metricKind: "counter", + name: "counter:: startTimestampMs from _created", + intervalStartTimestampMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 13, value: 33.7, metric: "value"}, + {at: 13, value: 150, metric: "value_created"}, + }, + want: func() pmetric.NumberDataPoint { + point := pmetric.NewNumberDataPoint() + point.SetDoubleValue(150) + + // the time in milliseconds -> nanoseconds. + point.SetTimestamp(pcommon.Timestamp(13 * time.Millisecond)) + point.SetStartTimestamp(pcommon.Timestamp(13 * time.Millisecond)) + + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + metricKind: "counter_created", + name: "counter:: startTimestampMs from _created", + intervalStartTimestampMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 13, value: 33.7, metric: "counter"}, + {at: 13, value: 150, metric: "counter_created"}, + }, + want: func() pmetric.NumberDataPoint { + point := pmetric.NewNumberDataPoint() + point.SetDoubleValue(33.7) + + // the time in milliseconds -> nanoseconds. + point.SetTimestamp(pcommon.Timestamp(13 * time.Millisecond)) + point.SetStartTimestamp(timestampFromFloat64(150)) + + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + metricKind: "counter", + name: "counter:: startTimestampMs of 11", + intervalStartTimestampMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 13, value: 33.7, metric: "value"}, + }, + want: func() pmetric.NumberDataPoint { + point := pmetric.NewNumberDataPoint() + point.SetDoubleValue(33.7) + point.SetTimestamp(pcommon.Timestamp(13 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(13 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + { + name: "counter:: startTimestampMs of 0", + metricKind: "counter", + intervalStartTimestampMs: 0, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 28, value: 99.9, metric: "value"}, + }, + want: func() pmetric.NumberDataPoint { + point := pmetric.NewNumberDataPoint() + point.SetDoubleValue(99.9) + point.SetTimestamp(pcommon.Timestamp(28 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(28 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + mp := newMetricFamily(tt.metricKind, mc, zap.NewNop()) + for _, tv := range tt.scrapes { + lb := tt.labels.Copy() + sRef, _ := getSeriesRef(nil, lb, mp.mtype) + require.NoError(t, mp.addSeries(sRef, tv.metric, lb, tv.at, tv.value)) + } + + require.Len(t, mp.groups, 1) + + sl := pmetric.NewMetricSlice() + mp.appendMetric(sl, false) + + require.Equal(t, 1, sl.Len(), "Exactly one metric expected") + metric := sl.At(0) + require.Equal(t, mc[tt.metricKind].Help, metric.Description(), "Expected help metadata in metric description") + require.Equal(t, mc[tt.metricKind].Unit, metric.Unit(), "Expected unit metadata in metric") + + ndpL := metric.Sum().DataPoints() + require.Equal(t, 1, ndpL.Len(), "Exactly one point expected") + got := ndpL.At(0) + want := tt.want() + require.Equal(t, want, got, "Expected the points to be equal") + }) + } +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/metrics_adjuster.go b/pkg/promotel/internal/prometheusreceiver/internal/metrics_adjuster.go new file mode 100644 index 0000000000..cd1328b3bc --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/metrics_adjuster.go @@ -0,0 +1,489 @@ +package internal + +import ( + "errors" + "sync" + "time" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + semconv "go.opentelemetry.io/collector/semconv/v1.27.0" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheus/pdatautil" +) + +// Notes on garbage collection (gc): +// +// Job-level gc: +// The Prometheus receiver will likely execute in a long running service whose lifetime may exceed +// the lifetimes of many of the jobs that it is collecting from. In order to keep the JobsMap from +// leaking memory for entries of no-longer existing jobs, the JobsMap needs to remove entries that +// haven't been accessed for a long period of time. +// +// Timeseries-level gc: +// Some jobs that the Prometheus receiver is collecting from may export timeseries based on metrics +// from other jobs (e.g. cAdvisor). In order to keep the timeseriesMap from leaking memory for entries +// of no-longer existing jobs, the timeseriesMap for each job needs to remove entries that haven't +// been accessed for a long period of time. +// +// The gc strategy uses a standard mark-and-sweep approach - each time a timeseriesMap is accessed, +// it is marked. Similarly, each time a timeseriesInfo is accessed, it is also marked. +// +// At the end of each JobsMap.get(), if the last time the JobsMap was gc'd exceeds the 'gcInterval', +// the JobsMap is locked and any timeseriesMaps that are unmarked are removed from the JobsMap +// otherwise the timeseriesMap is gc'd +// +// The gc for the timeseriesMap is straightforward - the map is locked and, for each timeseriesInfo +// in the map, if it has not been marked, it is removed otherwise it is unmarked. +// +// Alternative Strategies +// 1. If the job-level gc doesn't run often enough, or runs too often, a separate go routine can +// be spawned at JobMap creation time that gc's at periodic intervals. This approach potentially +// adds more contention and latency to each scrape so the current approach is used. Note that +// the go routine will need to be cancelled upon Shutdown(). +// 2. If the gc of each timeseriesMap during the gc of the JobsMap causes too much contention, +// the gc of timeseriesMaps can be moved to the end of MetricsAdjuster().AdjustMetricSlice(). This +// approach requires adding 'lastGC' Time and (potentially) a gcInterval duration to +// timeseriesMap so the current approach is used instead. + +// timeseriesInfo contains the information necessary to adjust from the initial point and to detect resets. +type timeseriesInfo struct { + mark bool + + number numberInfo + histogram histogramInfo + summary summaryInfo +} + +type numberInfo struct { + startTime pcommon.Timestamp + previousValue float64 +} + +type histogramInfo struct { + startTime pcommon.Timestamp + previousCount uint64 + previousSum float64 +} + +type summaryInfo struct { + startTime pcommon.Timestamp + previousCount uint64 + previousSum float64 +} + +type timeseriesKey struct { + name string + attributes [16]byte + aggTemporality pmetric.AggregationTemporality +} + +// timeseriesMap maps from a timeseries instance (metric * label values) to the timeseries info for +// the instance. +type timeseriesMap struct { + sync.RWMutex + // The mutex is used to protect access to the member fields. It is acquired for the entirety of + // AdjustMetricSlice() and also acquired by gc(). + + mark bool + tsiMap map[timeseriesKey]*timeseriesInfo +} + +// Get the timeseriesInfo for the timeseries associated with the metric and label values. +func (tsm *timeseriesMap) get(metric pmetric.Metric, kv pcommon.Map) (*timeseriesInfo, bool) { + // This should only be invoked be functions called (directly or indirectly) by AdjustMetricSlice(). + // The lock protecting tsm.tsiMap is acquired there. + name := metric.Name() + key := timeseriesKey{ + name: name, + attributes: getAttributesSignature(kv), + } + switch metric.Type() { + case pmetric.MetricTypeHistogram: + // There are 2 types of Histograms whose aggregation temporality needs distinguishing: + // * CumulativeHistogram + // * GaugeHistogram + key.aggTemporality = metric.Histogram().AggregationTemporality() + case pmetric.MetricTypeExponentialHistogram: + // There are 2 types of ExponentialHistograms whose aggregation temporality needs distinguishing: + // * CumulativeHistogram + // * GaugeHistogram + key.aggTemporality = metric.ExponentialHistogram().AggregationTemporality() + default: + } + + tsm.mark = true + tsi, ok := tsm.tsiMap[key] + if !ok { + tsi = ×eriesInfo{} + tsm.tsiMap[key] = tsi + } + tsi.mark = true + return tsi, ok +} + +// Create a unique string signature for attributes values sorted by attribute keys. +func getAttributesSignature(m pcommon.Map) [16]byte { + clearedMap := pcommon.NewMap() + m.Range(func(k string, attrValue pcommon.Value) bool { + value := attrValue.Str() + if value != "" { + clearedMap.PutStr(k, value) + } + return true + }) + return pdatautil.MapHash(clearedMap) +} + +// Remove timeseries that have aged out. +func (tsm *timeseriesMap) gc() { + tsm.Lock() + defer tsm.Unlock() + // this shouldn't happen under the current gc() strategy + if !tsm.mark { + return + } + for ts, tsi := range tsm.tsiMap { + if !tsi.mark { + delete(tsm.tsiMap, ts) + } else { + tsi.mark = false + } + } + tsm.mark = false +} + +func newTimeseriesMap() *timeseriesMap { + return ×eriesMap{mark: true, tsiMap: map[timeseriesKey]*timeseriesInfo{}} +} + +// JobsMap maps from a job instance to a map of timeseries instances for the job. +type JobsMap struct { + sync.RWMutex + // The mutex is used to protect access to the member fields. It is acquired for most of + // get() and also acquired by gc(). + + gcInterval time.Duration + lastGC time.Time + jobsMap map[string]*timeseriesMap +} + +// NewJobsMap creates a new (empty) JobsMap. +func NewJobsMap(gcInterval time.Duration) *JobsMap { + return &JobsMap{gcInterval: gcInterval, lastGC: time.Now(), jobsMap: make(map[string]*timeseriesMap)} +} + +// Remove jobs and timeseries that have aged out. +func (jm *JobsMap) gc() { + jm.Lock() + defer jm.Unlock() + // once the structure is locked, confirm that gc() is still necessary + if time.Since(jm.lastGC) > jm.gcInterval { + for sig, tsm := range jm.jobsMap { + tsm.RLock() + tsmNotMarked := !tsm.mark + // take a read lock here, no need to get a full lock as we have a lock on the JobsMap + tsm.RUnlock() + if tsmNotMarked { + delete(jm.jobsMap, sig) + } else { + // a full lock will be obtained in here, if required. + tsm.gc() + } + } + jm.lastGC = time.Now() + } +} + +func (jm *JobsMap) maybeGC() { + // speculatively check if gc() is necessary, recheck once the structure is locked + jm.RLock() + defer jm.RUnlock() + if time.Since(jm.lastGC) > jm.gcInterval { + go jm.gc() + } +} + +func (jm *JobsMap) get(job, instance string) *timeseriesMap { + sig := job + ":" + instance + // a read locke is taken here as we will not need to modify jobsMap if the target timeseriesMap is available. + jm.RLock() + tsm, ok := jm.jobsMap[sig] + jm.RUnlock() + defer jm.maybeGC() + if ok { + return tsm + } + jm.Lock() + defer jm.Unlock() + // Now that we've got an exclusive lock, check once more to ensure an entry wasn't created in the interim + // and then create a new timeseriesMap if required. + tsm2, ok2 := jm.jobsMap[sig] + if ok2 { + return tsm2 + } + tsm2 = newTimeseriesMap() + jm.jobsMap[sig] = tsm2 + return tsm2 +} + +type MetricsAdjuster interface { + AdjustMetrics(metrics pmetric.Metrics) error +} + +// initialPointAdjuster takes a map from a metric instance to the initial point in the metrics instance +// and provides AdjustMetricSlice, which takes a sequence of metrics and adjust their start times based on +// the initial points. +type initialPointAdjuster struct { + jobsMap *JobsMap + logger *zap.Logger + useCreatedMetric bool +} + +// NewInitialPointAdjuster returns a new MetricsAdjuster that adjust metrics' start times based on the initial received points. +func NewInitialPointAdjuster(logger *zap.Logger, gcInterval time.Duration, useCreatedMetric bool) MetricsAdjuster { + return &initialPointAdjuster{ + jobsMap: NewJobsMap(gcInterval), + logger: logger, + useCreatedMetric: useCreatedMetric, + } +} + +// AdjustMetrics takes a sequence of metrics and adjust their start times based on the initial and +// previous points in the timeseriesMap. +func (a *initialPointAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { + // By contract metrics will have at least 1 data point, so for sure will have at least one ResourceMetrics. + + job, found := metrics.ResourceMetrics().At(0).Resource().Attributes().Get(semconv.AttributeServiceName) + if !found { + return errors.New("adjusting metrics without job") + } + + instance, found := metrics.ResourceMetrics().At(0).Resource().Attributes().Get(semconv.AttributeServiceInstanceID) + if !found { + return errors.New("adjusting metrics without instance") + } + tsm := a.jobsMap.get(job.Str(), instance.Str()) + + // The lock on the relevant timeseriesMap is held throughout the adjustment process to ensure that + // nothing else can modify the data used for adjustment. + tsm.Lock() + defer tsm.Unlock() + for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + rm := metrics.ResourceMetrics().At(i) + for j := 0; j < rm.ScopeMetrics().Len(); j++ { + ilm := rm.ScopeMetrics().At(j) + for k := 0; k < ilm.Metrics().Len(); k++ { + metric := ilm.Metrics().At(k) + switch dataType := metric.Type(); dataType { + case pmetric.MetricTypeGauge: + // gauges don't need to be adjusted so no additional processing is necessary + + case pmetric.MetricTypeHistogram: + a.adjustMetricHistogram(tsm, metric) + + case pmetric.MetricTypeSummary: + a.adjustMetricSummary(tsm, metric) + + case pmetric.MetricTypeSum: + a.adjustMetricSum(tsm, metric) + + case pmetric.MetricTypeExponentialHistogram: + a.adjustMetricExponentialHistogram(tsm, metric) + + case pmetric.MetricTypeEmpty: + fallthrough + + default: + // this shouldn't happen + a.logger.Info("Adjust - skipping unexpected point", zap.String("type", dataType.String())) + } + } + } + } + return nil +} + +func (a *initialPointAdjuster) adjustMetricHistogram(tsm *timeseriesMap, current pmetric.Metric) { + histogram := current.Histogram() + if histogram.AggregationTemporality() != pmetric.AggregationTemporalityCumulative { + // Only dealing with CumulativeDistributions. + return + } + + currentPoints := histogram.DataPoints() + for i := 0; i < currentPoints.Len(); i++ { + currentDist := currentPoints.At(i) + + // start timestamp was set from _created + if a.useCreatedMetric && + !currentDist.Flags().NoRecordedValue() && + currentDist.StartTimestamp() < currentDist.Timestamp() { + continue + } + + tsi, found := tsm.get(current, currentDist.Attributes()) + if !found { + // initialize everything. + tsi.histogram.startTime = currentDist.StartTimestamp() + tsi.histogram.previousCount = currentDist.Count() + tsi.histogram.previousSum = currentDist.Sum() + continue + } + + if currentDist.Flags().NoRecordedValue() { + // TODO: Investigate why this does not reset. + currentDist.SetStartTimestamp(tsi.histogram.startTime) + continue + } + + if currentDist.Count() < tsi.histogram.previousCount || currentDist.Sum() < tsi.histogram.previousSum { + // reset re-initialize everything. + tsi.histogram.startTime = currentDist.StartTimestamp() + tsi.histogram.previousCount = currentDist.Count() + tsi.histogram.previousSum = currentDist.Sum() + continue + } + + // Update only previous values. + tsi.histogram.previousCount = currentDist.Count() + tsi.histogram.previousSum = currentDist.Sum() + currentDist.SetStartTimestamp(tsi.histogram.startTime) + } +} + +func (a *initialPointAdjuster) adjustMetricExponentialHistogram(tsm *timeseriesMap, current pmetric.Metric) { + histogram := current.ExponentialHistogram() + if histogram.AggregationTemporality() != pmetric.AggregationTemporalityCumulative { + // Only dealing with CumulativeDistributions. + return + } + + currentPoints := histogram.DataPoints() + for i := 0; i < currentPoints.Len(); i++ { + currentDist := currentPoints.At(i) + + // start timestamp was set from _created + if a.useCreatedMetric && + !currentDist.Flags().NoRecordedValue() && + currentDist.StartTimestamp() < currentDist.Timestamp() { + continue + } + + tsi, found := tsm.get(current, currentDist.Attributes()) + if !found { + // initialize everything. + tsi.histogram.startTime = currentDist.StartTimestamp() + tsi.histogram.previousCount = currentDist.Count() + tsi.histogram.previousSum = currentDist.Sum() + continue + } + + if currentDist.Flags().NoRecordedValue() { + // TODO: Investigate why this does not reset. + currentDist.SetStartTimestamp(tsi.histogram.startTime) + continue + } + + if currentDist.Count() < tsi.histogram.previousCount || currentDist.Sum() < tsi.histogram.previousSum { + // reset re-initialize everything. + tsi.histogram.startTime = currentDist.StartTimestamp() + tsi.histogram.previousCount = currentDist.Count() + tsi.histogram.previousSum = currentDist.Sum() + continue + } + + // Update only previous values. + tsi.histogram.previousCount = currentDist.Count() + tsi.histogram.previousSum = currentDist.Sum() + currentDist.SetStartTimestamp(tsi.histogram.startTime) + } +} + +func (a *initialPointAdjuster) adjustMetricSum(tsm *timeseriesMap, current pmetric.Metric) { + currentPoints := current.Sum().DataPoints() + for i := 0; i < currentPoints.Len(); i++ { + currentSum := currentPoints.At(i) + + // start timestamp was set from _created + if a.useCreatedMetric && + !currentSum.Flags().NoRecordedValue() && + currentSum.StartTimestamp() < currentSum.Timestamp() { + continue + } + + tsi, found := tsm.get(current, currentSum.Attributes()) + if !found { + // initialize everything. + tsi.number.startTime = currentSum.StartTimestamp() + tsi.number.previousValue = currentSum.DoubleValue() + continue + } + + if currentSum.Flags().NoRecordedValue() { + // TODO: Investigate why this does not reset. + currentSum.SetStartTimestamp(tsi.number.startTime) + continue + } + + if currentSum.DoubleValue() < tsi.number.previousValue { + // reset re-initialize everything. + tsi.number.startTime = currentSum.StartTimestamp() + tsi.number.previousValue = currentSum.DoubleValue() + continue + } + + // Update only previous values. + tsi.number.previousValue = currentSum.DoubleValue() + currentSum.SetStartTimestamp(tsi.number.startTime) + } +} + +func (a *initialPointAdjuster) adjustMetricSummary(tsm *timeseriesMap, current pmetric.Metric) { + currentPoints := current.Summary().DataPoints() + + for i := 0; i < currentPoints.Len(); i++ { + currentSummary := currentPoints.At(i) + + // start timestamp was set from _created + if a.useCreatedMetric && + !currentSummary.Flags().NoRecordedValue() && + currentSummary.StartTimestamp() < currentSummary.Timestamp() { + continue + } + + tsi, found := tsm.get(current, currentSummary.Attributes()) + if !found { + // initialize everything. + tsi.summary.startTime = currentSummary.StartTimestamp() + tsi.summary.previousCount = currentSummary.Count() + tsi.summary.previousSum = currentSummary.Sum() + continue + } + + if currentSummary.Flags().NoRecordedValue() { + // TODO: Investigate why this does not reset. + currentSummary.SetStartTimestamp(tsi.summary.startTime) + continue + } + + if (currentSummary.Count() != 0 && + tsi.summary.previousCount != 0 && + currentSummary.Count() < tsi.summary.previousCount) || + (currentSummary.Sum() != 0 && + tsi.summary.previousSum != 0 && + currentSummary.Sum() < tsi.summary.previousSum) { + // reset re-initialize everything. + tsi.summary.startTime = currentSummary.StartTimestamp() + tsi.summary.previousCount = currentSummary.Count() + tsi.summary.previousSum = currentSummary.Sum() + continue + } + + // Update only previous values. + tsi.summary.previousCount = currentSummary.Count() + tsi.summary.previousSum = currentSummary.Sum() + currentSummary.SetStartTimestamp(tsi.summary.startTime) + } +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/metrics_adjuster_test.go b/pkg/promotel/internal/prometheusreceiver/internal/metrics_adjuster_test.go new file mode 100644 index 0000000000..a3fb4e043a --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/metrics_adjuster_test.go @@ -0,0 +1,741 @@ +package internal + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + semconv "go.opentelemetry.io/collector/semconv/v1.27.0" + "go.uber.org/zap" +) + +var ( + tUnknown = timestampFromMs(0) + t1 = timestampFromMs(1) + t2 = timestampFromMs(2) + t3 = timestampFromMs(3) + t4 = timestampFromMs(4) + t5 = timestampFromMs(5) + + bounds0 = []float64{1, 2, 4} + percent0 = []float64{10, 50, 90} + + sum1 = "sum1" + gauge1 = "gauge1" + histogram1 = "histogram1" + summary1 = "summary1" + exponentialHistogram1 = "exponentialHistogram1" + + k1v1k2v2 = []*kv{ + {"k1", "v1"}, + {"k2", "v2"}, + } + + k1v10k2v20 = []*kv{ + {"k1", "v10"}, + {"k2", "v20"}, + } + + k1v100k2v200 = []*kv{ + {"k1", "v100"}, + {"k2", "v200"}, + } + + emptyLabels []*kv + k1vEmpty = []*kv{{"k1", ""}} + k1vEmptyk2vEmptyk3vEmpty = []*kv{{"k1", ""}, {"k2", ""}, {"k3", ""}} +) + +func TestGauge(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Gauge: round 1 - gauge not adjusted", + metrics: metrics(gaugeMetric(gauge1, doublePoint(k1v1k2v2, t1, t1, 44))), + adjusted: metrics(gaugeMetric(gauge1, doublePoint(k1v1k2v2, t1, t1, 44))), + }, + { + description: "Gauge: round 2 - gauge not adjusted", + metrics: metrics(gaugeMetric(gauge1, doublePoint(k1v1k2v2, t2, t2, 66))), + adjusted: metrics(gaugeMetric(gauge1, doublePoint(k1v1k2v2, t2, t2, 66))), + }, + { + description: "Gauge: round 3 - value less than previous value - gauge is not adjusted", + metrics: metrics(gaugeMetric(gauge1, doublePoint(k1v1k2v2, t3, t3, 55))), + adjusted: metrics(gaugeMetric(gauge1, doublePoint(k1v1k2v2, t3, t3, 55))), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestSum(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Sum: round 1 - initial instance, start time is established", + metrics: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44))), + }, + { + description: "Sum: round 2 - instance adjusted based on round 1", + metrics: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t2, t2, 66))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t1, t2, 66))), + }, + { + description: "Sum: round 3 - instance reset (value less than previous value), start time is reset", + metrics: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t3, t3, 55))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t3, t3, 55))), + }, + { + description: "Sum: round 4 - instance adjusted based on round 3", + metrics: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t4, t4, 72))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t3, t4, 72))), + }, + { + description: "Sum: round 5 - instance adjusted based on round 4", + metrics: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t5, t5, 72))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t3, t5, 72))), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestSummaryNoCount(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Summary No Count: round 1 - initial instance, start time is established", + metrics: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 0, 40, percent0, []float64{1, 5, 8}))), + adjusted: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 0, 40, percent0, []float64{1, 5, 8}))), + }, + { + description: "Summary No Count: round 2 - instance adjusted based on round 1", + metrics: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t2, t2, 0, 70, percent0, []float64{7, 44, 9}))), + adjusted: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t2, 0, 70, percent0, []float64{7, 44, 9}))), + }, + { + description: "Summary No Count: round 3 - instance reset (count less than previous), start time is reset", + metrics: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t3, 0, 66, percent0, []float64{3, 22, 5}))), + adjusted: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t3, 0, 66, percent0, []float64{3, 22, 5}))), + }, + { + description: "Summary No Count: round 4 - instance adjusted based on round 3", + metrics: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t4, t4, 0, 96, percent0, []float64{9, 47, 8}))), + adjusted: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t4, 0, 96, percent0, []float64{9, 47, 8}))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestSummaryFlagNoRecordedValue(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Summary No Count: round 1 - initial instance, start time is established", + metrics: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 0, 40, percent0, []float64{1, 5, 8}))), + adjusted: metrics(summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 0, 40, percent0, []float64{1, 5, 8}))), + }, + { + description: "Summary Flag NoRecordedValue: round 2 - instance adjusted based on round 1", + metrics: metrics(summaryMetric(summary1, summaryPointNoValue(k1v1k2v2, t2, t2))), + adjusted: metrics(summaryMetric(summary1, summaryPointNoValue(k1v1k2v2, t1, t2))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestSummary(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Summary: round 1 - initial instance, start time is established", + metrics: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 10, 40, percent0, []float64{1, 5, 8})), + ), + adjusted: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 10, 40, percent0, []float64{1, 5, 8})), + ), + }, + { + description: "Summary: round 2 - instance adjusted based on round 1", + metrics: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t2, t2, 15, 70, percent0, []float64{7, 44, 9})), + ), + adjusted: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t2, 15, 70, percent0, []float64{7, 44, 9})), + ), + }, + { + description: "Summary: round 3 - instance reset (count less than previous), start time is reset", + metrics: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t3, 12, 66, percent0, []float64{3, 22, 5})), + ), + adjusted: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t3, 12, 66, percent0, []float64{3, 22, 5})), + ), + }, + { + description: "Summary: round 4 - instance adjusted based on round 3", + metrics: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t4, t4, 14, 96, percent0, []float64{9, 47, 8})), + ), + adjusted: metrics( + summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t4, 14, 96, percent0, []float64{9, 47, 8})), + ), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestHistogram(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Histogram: round 1 - initial instance, start time is established", + metrics: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7}))), + adjusted: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7}))), + }, { + description: "Histogram: round 2 - instance adjusted based on round 1", + metrics: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t2, t2, bounds0, []uint64{6, 3, 4, 8}))), + adjusted: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t2, bounds0, []uint64{6, 3, 4, 8}))), + }, { + description: "Histogram: round 3 - instance reset (value less than previous value), start time is reset", + metrics: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t3, bounds0, []uint64{5, 3, 2, 7}))), + adjusted: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t3, bounds0, []uint64{5, 3, 2, 7}))), + }, { + description: "Histogram: round 4 - instance adjusted based on round 3", + metrics: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t4, t4, bounds0, []uint64{7, 4, 2, 12}))), + adjusted: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t4, bounds0, []uint64{7, 4, 2, 12}))), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestHistogramFlagNoRecordedValue(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Histogram: round 1 - initial instance, start time is established", + metrics: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{7, 4, 2, 12}))), + adjusted: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{7, 4, 2, 12}))), + }, + { + description: "Histogram: round 2 - instance adjusted based on round 1", + metrics: metrics(histogramMetric(histogram1, histogramPointNoValue(k1v1k2v2, tUnknown, t2))), + adjusted: metrics(histogramMetric(histogram1, histogramPointNoValue(k1v1k2v2, t1, t2))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestHistogramFlagNoRecordedValueFirstObservation(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Histogram: round 1 - initial instance, start time is unknown", + metrics: metrics(histogramMetric(histogram1, histogramPointNoValue(k1v1k2v2, tUnknown, t1))), + adjusted: metrics(histogramMetric(histogram1, histogramPointNoValue(k1v1k2v2, tUnknown, t1))), + }, + { + description: "Histogram: round 2 - instance unchanged", + metrics: metrics(histogramMetric(histogram1, histogramPointNoValue(k1v1k2v2, tUnknown, t2))), + adjusted: metrics(histogramMetric(histogram1, histogramPointNoValue(k1v1k2v2, tUnknown, t2))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +// In TestExponentHistogram we exclude negative buckets on purpose as they are +// not considered the main use case - response times that are most commonly +// observed are never negative. Negative buckets would make the Sum() non +// monotonic and cause unexpected resets. +func TestExponentialHistogram(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Exponential Histogram: round 1 - initial instance, start time is established", + metrics: metrics(exponentialHistogramMetric(exponentialHistogram1, exponentialHistogramPoint(k1v1k2v2, t1, t1, 3, 1, 0, []uint64{}, -2, []uint64{4, 2, 3, 7}))), + adjusted: metrics(exponentialHistogramMetric(exponentialHistogram1, exponentialHistogramPoint(k1v1k2v2, t1, t1, 3, 1, 0, []uint64{}, -2, []uint64{4, 2, 3, 7}))), + }, { + description: "Exponential Histogram: round 2 - instance adjusted based on round 1", + metrics: metrics(exponentialHistogramMetric(exponentialHistogram1, exponentialHistogramPoint(k1v1k2v2, t2, t2, 3, 1, 0, []uint64{}, -2, []uint64{6, 2, 3, 7}))), + adjusted: metrics(exponentialHistogramMetric(exponentialHistogram1, exponentialHistogramPoint(k1v1k2v2, t1, t2, 3, 1, 0, []uint64{}, -2, []uint64{6, 2, 3, 7}))), + }, { + description: "Exponential Histogram: round 3 - instance reset (value less than previous value), start time is reset", + metrics: metrics(exponentialHistogramMetric(histogram1, exponentialHistogramPoint(k1v1k2v2, t3, t3, 3, 1, 0, []uint64{}, -2, []uint64{5, 3, 2, 7}))), + adjusted: metrics(exponentialHistogramMetric(histogram1, exponentialHistogramPoint(k1v1k2v2, t3, t3, 3, 1, 0, []uint64{}, -2, []uint64{5, 3, 2, 7}))), + }, { + description: "Exponential Histogram: round 4 - instance adjusted based on round 3", + metrics: metrics(exponentialHistogramMetric(histogram1, exponentialHistogramPoint(k1v1k2v2, t4, t4, 3, 1, 0, []uint64{}, -2, []uint64{7, 4, 2, 12}))), + adjusted: metrics(exponentialHistogramMetric(histogram1, exponentialHistogramPoint(k1v1k2v2, t3, t4, 3, 1, 0, []uint64{}, -2, []uint64{7, 4, 2, 12}))), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestExponentialHistogramFlagNoRecordedValue(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Histogram: round 1 - initial instance, start time is established", + metrics: metrics(exponentialHistogramMetric(histogram1, exponentialHistogramPoint(k1v1k2v2, t1, t1, 0, 2, 2, []uint64{7, 4, 2, 12}, 3, []uint64{}))), + adjusted: metrics(exponentialHistogramMetric(histogram1, exponentialHistogramPoint(k1v1k2v2, t1, t1, 0, 2, 2, []uint64{7, 4, 2, 12}, 3, []uint64{}))), + }, + { + description: "Histogram: round 2 - instance adjusted based on round 1", + metrics: metrics(exponentialHistogramMetric(histogram1, exponentialHistogramPointNoValue(k1v1k2v2, tUnknown, t2))), + adjusted: metrics(exponentialHistogramMetric(histogram1, exponentialHistogramPointNoValue(k1v1k2v2, t1, t2))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestExponentialHistogramFlagNoRecordedValueFirstObservation(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Histogram: round 1 - initial instance, start time is unknown", + metrics: metrics(exponentialHistogramMetric(histogram1, exponentialHistogramPointNoValue(k1v1k2v2, tUnknown, t1))), + adjusted: metrics(exponentialHistogramMetric(histogram1, exponentialHistogramPointNoValue(k1v1k2v2, tUnknown, t1))), + }, + { + description: "Histogram: round 2 - instance unchanged", + metrics: metrics(exponentialHistogramMetric(histogram1, exponentialHistogramPointNoValue(k1v1k2v2, tUnknown, t2))), + adjusted: metrics(exponentialHistogramMetric(histogram1, exponentialHistogramPointNoValue(k1v1k2v2, tUnknown, t2))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestSummaryFlagNoRecordedValueFirstObservation(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Summary: round 1 - initial instance, start time is unknown", + metrics: metrics(summaryMetric(summary1, summaryPointNoValue(k1v1k2v2, tUnknown, t1))), + adjusted: metrics(summaryMetric(summary1, summaryPointNoValue(k1v1k2v2, tUnknown, t1))), + }, + { + description: "Summary: round 2 - instance unchanged", + metrics: metrics(summaryMetric(summary1, summaryPointNoValue(k1v1k2v2, tUnknown, t2))), + adjusted: metrics(summaryMetric(summary1, summaryPointNoValue(k1v1k2v2, tUnknown, t2))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestGaugeFlagNoRecordedValueFirstObservation(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Gauge: round 1 - initial instance, start time is unknown", + metrics: metrics(gaugeMetric(gauge1, doublePointNoValue(k1v1k2v2, tUnknown, t1))), + adjusted: metrics(gaugeMetric(gauge1, doublePointNoValue(k1v1k2v2, tUnknown, t1))), + }, + { + description: "Gauge: round 2 - instance unchanged", + metrics: metrics(gaugeMetric(gauge1, doublePointNoValue(k1v1k2v2, tUnknown, t2))), + adjusted: metrics(gaugeMetric(gauge1, doublePointNoValue(k1v1k2v2, tUnknown, t2))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestSumFlagNoRecordedValueFirstObservation(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "Sum: round 1 - initial instance, start time is unknown", + metrics: metrics(sumMetric("sum1", doublePointNoValue(k1v1k2v2, tUnknown, t1))), + adjusted: metrics(sumMetric("sum1", doublePointNoValue(k1v1k2v2, tUnknown, t1))), + }, + { + description: "Sum: round 2 - instance unchanged", + metrics: metrics(sumMetric("sum1", doublePointNoValue(k1v1k2v2, tUnknown, t2))), + adjusted: metrics(sumMetric("sum1", doublePointNoValue(k1v1k2v2, tUnknown, t2))), + }, + } + + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestMultiMetrics(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "MultiMetrics: round 1 - combined round 1 of individual metrics", + metrics: metrics( + gaugeMetric(gauge1, doublePoint(k1v1k2v2, t1, t1, 44)), + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 10, 40, percent0, []float64{1, 5, 8})), + ), + adjusted: metrics( + gaugeMetric(gauge1, doublePoint(k1v1k2v2, t1, t1, 44)), + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t1, 10, 40, percent0, []float64{1, 5, 8})), + ), + }, + { + description: "MultiMetrics: round 2 - combined round 2 of individual metrics", + metrics: metrics( + gaugeMetric(gauge1, doublePoint(k1v1k2v2, t2, t2, 66)), + sumMetric(sum1, doublePoint(k1v1k2v2, t2, t2, 66)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t2, t2, bounds0, []uint64{6, 3, 4, 8})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t2, t2, 15, 70, percent0, []float64{7, 44, 9})), + ), + adjusted: metrics( + gaugeMetric(gauge1, doublePoint(k1v1k2v2, t2, t2, 66)), + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t2, 66)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t2, bounds0, []uint64{6, 3, 4, 8})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t1, t2, 15, 70, percent0, []float64{7, 44, 9})), + ), + }, + { + description: "MultiMetrics: round 3 - combined round 3 of individual metrics", + metrics: metrics( + gaugeMetric(gauge1, doublePoint(k1v1k2v2, t3, t3, 55)), + sumMetric(sum1, doublePoint(k1v1k2v2, t3, t3, 55)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t3, bounds0, []uint64{5, 3, 2, 7})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t3, 12, 66, percent0, []float64{3, 22, 5})), + ), + adjusted: metrics( + gaugeMetric(gauge1, doublePoint(k1v1k2v2, t3, t3, 55)), + sumMetric(sum1, doublePoint(k1v1k2v2, t3, t3, 55)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t3, bounds0, []uint64{5, 3, 2, 7})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t3, 12, 66, percent0, []float64{3, 22, 5})), + ), + }, + { + description: "MultiMetrics: round 4 - combined round 4 of individual metrics", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t4, t4, 72)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t4, t4, bounds0, []uint64{7, 4, 2, 12})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t4, t4, 14, 96, percent0, []float64{9, 47, 8})), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t3, t4, 72)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t4, bounds0, []uint64{7, 4, 2, 12})), + summaryMetric(summary1, summaryPoint(k1v1k2v2, t3, t4, 14, 96, percent0, []float64{9, 47, 8})), + ), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestNewDataPointsAdded(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "New Datapoints: round 1 - two datapoints each", + metrics: metrics( + sumMetric(sum1, + doublePoint(k1v1k2v2, t1, t1, 44), + doublePoint(k1v100k2v200, t1, t1, 44)), + histogramMetric(histogram1, + histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7}), + histogramPoint(k1v100k2v200, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + summaryMetric(summary1, + summaryPoint(k1v1k2v2, t1, t1, 10, 40, percent0, []float64{1, 5, 8}), + summaryPoint(k1v100k2v200, t1, t1, 10, 40, percent0, []float64{1, 5, 8})), + ), + adjusted: metrics( + sumMetric(sum1, + doublePoint(k1v1k2v2, t1, t1, 44), + doublePoint(k1v100k2v200, t1, t1, 44)), + histogramMetric(histogram1, + histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7}), + histogramPoint(k1v100k2v200, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + summaryMetric(summary1, + summaryPoint(k1v1k2v2, t1, t1, 10, 40, percent0, []float64{1, 5, 8}), + summaryPoint(k1v100k2v200, t1, t1, 10, 40, percent0, []float64{1, 5, 8})), + ), + }, + { + description: "New Datapoints: round 2 - new datapoints unchanged, old datapoints adjusted", + metrics: metrics( + sumMetric(sum1, + doublePoint(k1v1k2v2, t2, t2, 44), + doublePoint(k1v10k2v20, t2, t2, 44), + doublePoint(k1v100k2v200, t2, t2, 44)), + histogramMetric(histogram1, + histogramPoint(k1v1k2v2, t2, t2, bounds0, []uint64{4, 2, 3, 7}), + histogramPoint(k1v10k2v20, t2, t2, bounds0, []uint64{4, 2, 3, 7}), + histogramPoint(k1v100k2v200, t2, t2, bounds0, []uint64{4, 2, 3, 7})), + summaryMetric(summary1, + summaryPoint(k1v1k2v2, t2, t2, 10, 40, percent0, []float64{1, 5, 8}), + summaryPoint(k1v10k2v20, t2, t2, 10, 40, percent0, []float64{1, 5, 8}), + summaryPoint(k1v100k2v200, t2, t2, 10, 40, percent0, []float64{1, 5, 8})), + ), + adjusted: metrics( + sumMetric(sum1, + doublePoint(k1v1k2v2, t1, t2, 44), + doublePoint(k1v10k2v20, t2, t2, 44), + doublePoint(k1v100k2v200, t1, t2, 44)), + histogramMetric(histogram1, + histogramPoint(k1v1k2v2, t1, t2, bounds0, []uint64{4, 2, 3, 7}), + histogramPoint(k1v10k2v20, t2, t2, bounds0, []uint64{4, 2, 3, 7}), + histogramPoint(k1v100k2v200, t1, t2, bounds0, []uint64{4, 2, 3, 7})), + summaryMetric(summary1, + summaryPoint(k1v1k2v2, t1, t2, 10, 40, percent0, []float64{1, 5, 8}), + summaryPoint(k1v10k2v20, t2, t2, 10, 40, percent0, []float64{1, 5, 8}), + summaryPoint(k1v100k2v200, t1, t2, 10, 40, percent0, []float64{1, 5, 8})), + ), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestMultiTimeseries(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "MultiTimeseries: round 1 - initial first instance, start time is established", + metrics: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44))), + }, + { + description: "MultiTimeseries: round 2 - first instance adjusted based on round 1, initial second instance", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t2, t2, 66)), + sumMetric(sum1, doublePoint(k1v10k2v20, t2, t2, 20.0)), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t2, 66)), + sumMetric(sum1, doublePoint(k1v10k2v20, t2, t2, 20.0)), + ), + }, + { + description: "MultiTimeseries: round 3 - first instance adjusted based on round 1, second based on round 2", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t3, t3, 88.0)), + sumMetric(sum1, doublePoint(k1v10k2v20, t3, t3, 49.0)), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t3, 88.0)), + sumMetric(sum1, doublePoint(k1v10k2v20, t2, t3, 49.0)), + ), + }, + { + description: "MultiTimeseries: round 4 - first instance reset, second instance adjusted based on round 2, initial third instance", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t4, t4, 87.0)), + sumMetric(sum1, doublePoint(k1v10k2v20, t4, t4, 57.0)), + sumMetric(sum1, doublePoint(k1v100k2v200, t4, t4, 10.0)), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t4, t4, 87.0)), + sumMetric(sum1, doublePoint(k1v10k2v20, t2, t4, 57.0)), + sumMetric(sum1, doublePoint(k1v100k2v200, t4, t4, 10.0)), + ), + }, + { + description: "MultiTimeseries: round 5 - first instance adjusted based on round 4, second on round 2, third on round 4", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t5, t5, 90.0)), + sumMetric(sum1, doublePoint(k1v10k2v20, t5, t5, 65.0)), + sumMetric(sum1, doublePoint(k1v100k2v200, t5, t5, 22.0)), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t4, t5, 90.0)), + sumMetric(sum1, doublePoint(k1v10k2v20, t2, t5, 65.0)), + sumMetric(sum1, doublePoint(k1v100k2v200, t4, t5, 22.0)), + ), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestEmptyLabels(t *testing.T) { + script := []*metricsAdjusterTest{ + { + description: "EmptyLabels: round 1 - initial instance, implicitly empty labels, start time is established", + metrics: metrics(sumMetric(sum1, doublePoint(emptyLabels, t1, t1, 44))), + adjusted: metrics(sumMetric(sum1, doublePoint(emptyLabels, t1, t1, 44))), + }, + { + description: "EmptyLabels: round 2 - instance adjusted based on round 1", + metrics: metrics(sumMetric(sum1, doublePoint(emptyLabels, t2, t2, 66))), + adjusted: metrics(sumMetric(sum1, doublePoint(emptyLabels, t1, t2, 66))), + }, + { + description: "EmptyLabels: round 3 - one explicitly empty label, instance adjusted based on round 1", + metrics: metrics(sumMetric(sum1, doublePoint(k1vEmpty, t3, t3, 77))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1vEmpty, t1, t3, 77))), + }, + { + description: "EmptyLabels: round 4 - three explicitly empty labels, instance adjusted based on round 1", + metrics: metrics(sumMetric(sum1, doublePoint(k1vEmptyk2vEmptyk3vEmpty, t3, t3, 88))), + adjusted: metrics(sumMetric(sum1, doublePoint(k1vEmptyk2vEmptyk3vEmpty, t1, t3, 88))), + }, + } + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) +} + +func TestTsGC(t *testing.T) { + script1 := []*metricsAdjusterTest{ + { + description: "TsGC: round 1 - initial instances, start time is established", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44)), + sumMetric(sum1, doublePoint(k1v10k2v20, t1, t1, 20)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t1, t1, bounds0, []uint64{40, 20, 30, 70})), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44)), + sumMetric(sum1, doublePoint(k1v10k2v20, t1, t1, 20)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t1, t1, bounds0, []uint64{40, 20, 30, 70})), + ), + }, + } + + script2 := []*metricsAdjusterTest{ + { + description: "TsGC: round 2 - metrics first timeseries adjusted based on round 2, second timeseries not updated", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t2, t2, 88)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t2, t2, bounds0, []uint64{8, 7, 9, 14})), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t2, 88)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t2, bounds0, []uint64{8, 7, 9, 14})), + ), + }, + } + + script3 := []*metricsAdjusterTest{ + { + description: "TsGC: round 3 - metrics first timeseries adjusted based on round 2, second timeseries empty due to timeseries gc()", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t3, t3, 99)), + sumMetric(sum1, doublePoint(k1v10k2v20, t3, t3, 80)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t3, bounds0, []uint64{9, 8, 10, 15})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t3, t3, bounds0, []uint64{55, 66, 33, 77})), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t3, 99)), + sumMetric(sum1, doublePoint(k1v10k2v20, t3, t3, 80)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t3, bounds0, []uint64{9, 8, 10, 15})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t3, t3, bounds0, []uint64{55, 66, 33, 77})), + ), + }, + } + + ma := NewInitialPointAdjuster(zap.NewNop(), time.Minute, true) + + // run round 1 + runScript(t, ma, "job", "0", script1) + // gc the tsmap, unmarking all entries + ma.(*initialPointAdjuster).jobsMap.get("job", "0").gc() + // run round 2 - update metrics first timeseries only + runScript(t, ma, "job", "0", script2) + // gc the tsmap, collecting umarked entries + ma.(*initialPointAdjuster).jobsMap.get("job", "0").gc() + // run round 3 - verify that metrics second timeseries have been gc'd + runScript(t, ma, "job", "0", script3) +} + +func TestJobGC(t *testing.T) { + job1Script1 := []*metricsAdjusterTest{ + { + description: "JobGC: job 1, round 1 - initial instances, adjusted should be empty", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44)), + sumMetric(sum1, doublePoint(k1v10k2v20, t1, t1, 20)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t1, t1, bounds0, []uint64{40, 20, 30, 70})), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t1, t1, 44)), + sumMetric(sum1, doublePoint(k1v10k2v20, t1, t1, 20)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t1, t1, bounds0, []uint64{4, 2, 3, 7})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t1, t1, bounds0, []uint64{40, 20, 30, 70})), + ), + }, + } + + job2Script1 := []*metricsAdjusterTest{ + { + description: "JobGC: job2, round 1 - no metrics adjusted, just trigger gc", + metrics: metrics(), + adjusted: metrics(), + }, + } + + job1Script2 := []*metricsAdjusterTest{ + { + description: "JobGC: job 1, round 2 - metrics timeseries empty due to job-level gc", + metrics: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t4, t4, 99)), + sumMetric(sum1, doublePoint(k1v10k2v20, t4, t4, 80)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t4, t4, bounds0, []uint64{9, 8, 10, 15})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t4, t4, bounds0, []uint64{55, 66, 33, 77})), + ), + adjusted: metrics( + sumMetric(sum1, doublePoint(k1v1k2v2, t4, t4, 99)), + sumMetric(sum1, doublePoint(k1v10k2v20, t4, t4, 80)), + histogramMetric(histogram1, histogramPoint(k1v1k2v2, t4, t4, bounds0, []uint64{9, 8, 10, 15})), + histogramMetric(histogram1, histogramPoint(k1v10k2v20, t4, t4, bounds0, []uint64{55, 66, 33, 77})), + ), + }, + } + + gcInterval := 10 * time.Millisecond + ma := NewInitialPointAdjuster(zap.NewNop(), gcInterval, true) + + // run job 1, round 1 - all entries marked + runScript(t, ma, "job1", "0", job1Script1) + // sleep longer than gcInterval to enable job gc in the next run + time.Sleep(2 * gcInterval) + // run job 2, round1 - trigger job gc, unmarking all entries + runScript(t, ma, "job1", "1", job2Script1) + // sleep longer than gcInterval to enable job gc in the next run + time.Sleep(2 * gcInterval) + // re-run job 2, round1 - trigger job gc, removing unmarked entries + runScript(t, ma, "job1", "1", job2Script1) + // ensure that at least one jobsMap.gc() completed + ma.(*initialPointAdjuster).jobsMap.gc() + // run job 1, round 2 - verify that all job 1 timeseries have been gc'd + runScript(t, ma, "job1", "0", job1Script2) +} + +type metricsAdjusterTest struct { + description string + metrics pmetric.Metrics + adjusted pmetric.Metrics +} + +func runScript(t *testing.T, ma MetricsAdjuster, job, instance string, tests []*metricsAdjusterTest) { + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + adjusted := pmetric.NewMetrics() + test.metrics.CopyTo(adjusted) + // Add the instance/job to the input metrics. + adjusted.ResourceMetrics().At(0).Resource().Attributes().PutStr(semconv.AttributeServiceInstanceID, instance) + adjusted.ResourceMetrics().At(0).Resource().Attributes().PutStr(semconv.AttributeServiceName, job) + require.NoError(t, ma.AdjustMetrics(adjusted)) + + // Add the instance/job to the expected metrics as well. + test.adjusted.ResourceMetrics().At(0).Resource().Attributes().PutStr(semconv.AttributeServiceInstanceID, instance) + test.adjusted.ResourceMetrics().At(0).Resource().Attributes().PutStr(semconv.AttributeServiceName, job) + assert.EqualValues(t, test.adjusted, adjusted) + }) + } +} + +func BenchmarkGetAttributesSignature(b *testing.B) { + attrs := pcommon.NewMap() + attrs.PutStr("key1", "some-random-test-value-1") + attrs.PutStr("key2", "some-random-test-value-2") + attrs.PutStr("key6", "some-random-test-value-6") + attrs.PutStr("key3", "some-random-test-value-3") + attrs.PutStr("key4", "some-random-test-value-4") + attrs.PutStr("key5", "some-random-test-value-5") + attrs.PutStr("key7", "some-random-test-value-7") + attrs.PutStr("key8", "some-random-test-value-8") + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + getAttributesSignature(attrs) + } +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/metricsutil_test.go b/pkg/promotel/internal/prometheusreceiver/internal/metricsutil_test.go new file mode 100644 index 0000000000..c9e0cb860a --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/metricsutil_test.go @@ -0,0 +1,272 @@ +package internal + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +type kv struct { + Key, Value string +} + +func metrics(metrics ...pmetric.Metric) pmetric.Metrics { + md := pmetric.NewMetrics() + ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + for _, metric := range metrics { + destMetric := ms.AppendEmpty() + metric.CopyTo(destMetric) + } + + return md +} + +func histogramPointRaw(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp) pmetric.HistogramDataPoint { + hdp := pmetric.NewHistogramDataPoint() + hdp.SetStartTimestamp(startTimestamp) + hdp.SetTimestamp(timestamp) + + attrs := hdp.Attributes() + for _, kv := range attributes { + attrs.PutStr(kv.Key, kv.Value) + } + + return hdp +} + +func histogramPoint(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp, bounds []float64, counts []uint64) pmetric.HistogramDataPoint { + hdp := histogramPointRaw(attributes, startTimestamp, timestamp) + hdp.ExplicitBounds().FromRaw(bounds) + hdp.BucketCounts().FromRaw(counts) + + var sum float64 + var count uint64 + for i, bcount := range counts { + count += bcount + if i > 0 { + sum += float64(bcount) * bounds[i-1] + } + } + hdp.SetCount(count) + hdp.SetSum(sum) + + return hdp +} + +func histogramPointNoValue(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp) pmetric.HistogramDataPoint { + hdp := histogramPointRaw(attributes, startTimestamp, timestamp) + hdp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + + return hdp +} + +func histogramMetric(name string, points ...pmetric.HistogramDataPoint) pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName(name) + metric.Metadata().PutStr("prometheus.type", "histogram") + histogram := metric.SetEmptyHistogram() + histogram.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + + destPointL := histogram.DataPoints() + // By default the AggregationTemporality is Cumulative until it'll be changed by the caller. + for _, point := range points { + destPoint := destPointL.AppendEmpty() + point.CopyTo(destPoint) + } + + return metric +} + +func exponentialHistogramMetric(name string, points ...pmetric.ExponentialHistogramDataPoint) pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName(name) + metric.Metadata().PutStr("prometheus.type", "histogram") + histogram := metric.SetEmptyExponentialHistogram() + histogram.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + + destPointL := histogram.DataPoints() + // By default the AggregationTemporality is Cumulative until it'll be changed by the caller. + for _, point := range points { + destPoint := destPointL.AppendEmpty() + point.CopyTo(destPoint) + } + + return metric +} + +func exponentialHistogramPointRaw(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp) pmetric.ExponentialHistogramDataPoint { + hdp := pmetric.NewExponentialHistogramDataPoint() + hdp.SetStartTimestamp(startTimestamp) + hdp.SetTimestamp(timestamp) + + attrs := hdp.Attributes() + for _, kv := range attributes { + attrs.PutStr(kv.Key, kv.Value) + } + + return hdp +} + +func exponentialHistogramPoint(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp, scale int32, zeroCount uint64, negativeOffset int32, negativeBuckets []uint64, positiveOffset int32, positiveBuckets []uint64) pmetric.ExponentialHistogramDataPoint { + hdp := exponentialHistogramPointRaw(attributes, startTimestamp, timestamp) + hdp.SetScale(scale) + hdp.SetZeroCount(zeroCount) + hdp.Negative().SetOffset(negativeOffset) + hdp.Negative().BucketCounts().FromRaw(negativeBuckets) + hdp.Positive().SetOffset(positiveOffset) + hdp.Positive().BucketCounts().FromRaw(positiveBuckets) + + count := uint64(0) + sum := float64(0) + for i, bCount := range positiveBuckets { + count += bCount + sum += float64(bCount) * float64(i) + } + for i, bCount := range negativeBuckets { + count += bCount + sum -= float64(bCount) * float64(i) + } + hdp.SetCount(count) + hdp.SetSum(sum) + return hdp +} + +func exponentialHistogramPointNoValue(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp) pmetric.ExponentialHistogramDataPoint { + hdp := exponentialHistogramPointRaw(attributes, startTimestamp, timestamp) + hdp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + + return hdp +} + +// exponentialHistogramPointSimplified let's you define an exponential +// histogram with just a few parameters. +// Scale and ZeroCount are set to the provided values. +// Positive and negative buckets are generated using the offset and bucketCount +// parameters by adding buckets from offset in both positive and negative +// directions. Bucket counts start from 1 and increase by 1 for each bucket. +// Sum and Count will be proportional to the bucket count. +func exponentialHistogramPointSimplified(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp, scale int32, zeroCount uint64, offset int32, bucketCount int) pmetric.ExponentialHistogramDataPoint { + hdp := exponentialHistogramPointRaw(attributes, startTimestamp, timestamp) + hdp.SetScale(scale) + hdp.SetZeroCount(zeroCount) + + positive := hdp.Positive() + positive.SetOffset(offset) + positive.BucketCounts().EnsureCapacity(bucketCount) + negative := hdp.Negative() + negative.SetOffset(offset) + negative.BucketCounts().EnsureCapacity(bucketCount) + + var sum float64 + var count uint64 + for i := 0; i < bucketCount; i++ { + positive.BucketCounts().Append(uint64(i + 1)) // nolint + negative.BucketCounts().Append(uint64(i + 1)) // nolint + count += uint64(i+1) + uint64(i+1) // nolint + sum += float64(i+1)*10 + float64(i+1)*10.0 + } + hdp.SetCount(count) + hdp.SetSum(sum) + + return hdp +} + +func doublePointRaw(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp) pmetric.NumberDataPoint { + ndp := pmetric.NewNumberDataPoint() + ndp.SetStartTimestamp(startTimestamp) + ndp.SetTimestamp(timestamp) + + for _, kv := range attributes { + ndp.Attributes().PutStr(kv.Key, kv.Value) + } + + return ndp +} + +func doublePoint(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp, value float64) pmetric.NumberDataPoint { + ndp := doublePointRaw(attributes, startTimestamp, timestamp) + ndp.SetDoubleValue(value) + return ndp +} + +func doublePointNoValue(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp) pmetric.NumberDataPoint { + ndp := doublePointRaw(attributes, startTimestamp, timestamp) + ndp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + return ndp +} + +func gaugeMetric(name string, points ...pmetric.NumberDataPoint) pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName(name) + metric.Metadata().PutStr("prometheus.type", "gauge") + destPointL := metric.SetEmptyGauge().DataPoints() + for _, point := range points { + destPoint := destPointL.AppendEmpty() + point.CopyTo(destPoint) + } + + return metric +} + +func sumMetric(name string, points ...pmetric.NumberDataPoint) pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName(name) + metric.Metadata().PutStr("prometheus.type", "counter") + sum := metric.SetEmptySum() + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum.SetIsMonotonic(true) + + destPointL := sum.DataPoints() + for _, point := range points { + destPoint := destPointL.AppendEmpty() + point.CopyTo(destPoint) + } + + return metric +} + +func summaryPointRaw(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp) pmetric.SummaryDataPoint { + sdp := pmetric.NewSummaryDataPoint() + sdp.SetStartTimestamp(startTimestamp) + sdp.SetTimestamp(timestamp) + + for _, kv := range attributes { + sdp.Attributes().PutStr(kv.Key, kv.Value) + } + + return sdp +} + +func summaryPoint(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp, count uint64, sum float64, quantiles, values []float64) pmetric.SummaryDataPoint { + sdp := summaryPointRaw(attributes, startTimestamp, timestamp) + sdp.SetCount(count) + sdp.SetSum(sum) + + qvL := sdp.QuantileValues() + for i := 0; i < len(quantiles); i++ { + qvi := qvL.AppendEmpty() + qvi.SetQuantile(quantiles[i]) + qvi.SetValue(values[i]) + } + + return sdp +} + +func summaryPointNoValue(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp) pmetric.SummaryDataPoint { + sdp := summaryPointRaw(attributes, startTimestamp, timestamp) + sdp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + + return sdp +} + +func summaryMetric(name string, points ...pmetric.SummaryDataPoint) pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName(name) + metric.Metadata().PutStr("prometheus.type", "summary") + destPointL := metric.SetEmptySummary().DataPoints() + for _, point := range points { + destPoint := destPointL.AppendEmpty() + point.CopyTo(destPoint) + } + + return metric +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/prom_to_otlp.go b/pkg/promotel/internal/prometheusreceiver/internal/prom_to_otlp.go new file mode 100644 index 0000000000..8a051f85b2 --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/prom_to_otlp.go @@ -0,0 +1,100 @@ +package internal + +import ( + "net" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "go.opentelemetry.io/collector/pdata/pcommon" + conventions "go.opentelemetry.io/collector/semconv/v1.25.0" + oldconventions "go.opentelemetry.io/collector/semconv/v1.6.1" +) + +var removeOldSemconvFeatureGateEnabled = true + +// isDiscernibleHost checks if a host can be used as a value for the 'host.name' key. +// localhost-like hosts and unspecified (0.0.0.0) hosts are not discernible. +func isDiscernibleHost(host string) bool { + ip := net.ParseIP(host) + if ip != nil { + // An IP is discernible if + // - it's not local (e.g. belongs to 127.0.0.0/8 or ::1/128) and + // - it's not unspecified (e.g. the 0.0.0.0 address). + return !ip.IsLoopback() && !ip.IsUnspecified() + } + + if host == "localhost" { + return false + } + + // not an IP, not 'localhost', assume it is discernible. + return true +} + +// CreateResource creates the resource data added to OTLP payloads. +func CreateResource(job, instance string, serviceDiscoveryLabels labels.Labels) pcommon.Resource { + host, port, err := net.SplitHostPort(instance) + if err != nil { + host = instance + } + resource := pcommon.NewResource() + attrs := resource.Attributes() + attrs.PutStr(conventions.AttributeServiceName, job) + if isDiscernibleHost(host) { + if !removeOldSemconvFeatureGateEnabled { + attrs.PutStr(oldconventions.AttributeNetHostName, host) + } + attrs.PutStr(conventions.AttributeServerAddress, host) + } + attrs.PutStr(conventions.AttributeServiceInstanceID, instance) + if !removeOldSemconvFeatureGateEnabled { + attrs.PutStr(conventions.AttributeNetHostPort, port) + attrs.PutStr(conventions.AttributeHTTPScheme, serviceDiscoveryLabels.Get(model.SchemeLabel)) + } + attrs.PutStr(conventions.AttributeServerPort, port) + attrs.PutStr(conventions.AttributeURLScheme, serviceDiscoveryLabels.Get(model.SchemeLabel)) + + addKubernetesResource(attrs, serviceDiscoveryLabels) + + return resource +} + +// kubernetesDiscoveryToResourceAttributes maps from metadata labels discovered +// through the kubernetes implementation of service discovery to opentelemetry +// resource attribute keys. +var kubernetesDiscoveryToResourceAttributes = map[string]string{ + "__meta_kubernetes_pod_name": conventions.AttributeK8SPodName, + "__meta_kubernetes_pod_uid": conventions.AttributeK8SPodUID, + "__meta_kubernetes_pod_container_name": conventions.AttributeK8SContainerName, + "__meta_kubernetes_namespace": conventions.AttributeK8SNamespaceName, + // Only one of the node name service discovery labels will be present + "__meta_kubernetes_pod_node_name": conventions.AttributeK8SNodeName, + "__meta_kubernetes_node_name": conventions.AttributeK8SNodeName, + "__meta_kubernetes_endpoint_node_name": conventions.AttributeK8SNodeName, +} + +// addKubernetesResource adds resource information detected by prometheus' +// kubernetes service discovery. +func addKubernetesResource(attrs pcommon.Map, serviceDiscoveryLabels labels.Labels) { + for sdKey, attributeKey := range kubernetesDiscoveryToResourceAttributes { + if attr := serviceDiscoveryLabels.Get(sdKey); attr != "" { + attrs.PutStr(attributeKey, attr) + } + } + controllerName := serviceDiscoveryLabels.Get("__meta_kubernetes_pod_controller_name") + controllerKind := serviceDiscoveryLabels.Get("__meta_kubernetes_pod_controller_kind") + if controllerKind != "" && controllerName != "" { + switch controllerKind { + case "ReplicaSet": + attrs.PutStr(conventions.AttributeK8SReplicaSetName, controllerName) + case "DaemonSet": + attrs.PutStr(conventions.AttributeK8SDaemonSetName, controllerName) + case "StatefulSet": + attrs.PutStr(conventions.AttributeK8SStatefulSetName, controllerName) + case "Job": + attrs.PutStr(conventions.AttributeK8SJobName, controllerName) + case "CronJob": + attrs.PutStr(conventions.AttributeK8SCronJobName, controllerName) + } + } +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/prom_to_otlp_test.go b/pkg/promotel/internal/prometheusreceiver/internal/prom_to_otlp_test.go new file mode 100644 index 0000000000..08b6c2809d --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/prom_to_otlp_test.go @@ -0,0 +1,368 @@ +package internal + +import ( + "testing" + + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + conventions "go.opentelemetry.io/collector/semconv/v1.27.0" +) + +type jobInstanceDefinition struct { + job, instance, host, scheme, port string +} + +type k8sResourceDefinition struct { + podName, podUID, container, node, rs, ds, ss, job, cronjob, ns string +} + +func makeK8sResource(jobInstance *jobInstanceDefinition, def *k8sResourceDefinition) pcommon.Resource { + resource := makeResourceWithJobInstanceScheme(jobInstance, true) + attrs := resource.Attributes() + if def.podName != "" { + attrs.PutStr(conventions.AttributeK8SPodName, def.podName) + } + if def.podUID != "" { + attrs.PutStr(conventions.AttributeK8SPodUID, def.podUID) + } + if def.container != "" { + attrs.PutStr(conventions.AttributeK8SContainerName, def.container) + } + if def.node != "" { + attrs.PutStr(conventions.AttributeK8SNodeName, def.node) + } + if def.rs != "" { + attrs.PutStr(conventions.AttributeK8SReplicaSetName, def.rs) + } + if def.ds != "" { + attrs.PutStr(conventions.AttributeK8SDaemonSetName, def.ds) + } + if def.ss != "" { + attrs.PutStr(conventions.AttributeK8SStatefulSetName, def.ss) + } + if def.job != "" { + attrs.PutStr(conventions.AttributeK8SJobName, def.job) + } + if def.cronjob != "" { + attrs.PutStr(conventions.AttributeK8SCronJobName, def.cronjob) + } + if def.ns != "" { + attrs.PutStr(conventions.AttributeK8SNamespaceName, def.ns) + } + return resource +} + +func makeResourceWithJobInstanceScheme(def *jobInstanceDefinition, hasHost bool) pcommon.Resource { + resource := pcommon.NewResource() + attrs := resource.Attributes() + // Using hardcoded values to assert on outward expectations so that + // when variables change, these tests will fail and we'll have reports. + attrs.PutStr("service.name", def.job) + if hasHost { + attrs.PutStr("server.address", def.host) + } + attrs.PutStr("service.instance.id", def.instance) + attrs.PutStr("server.port", def.port) + attrs.PutStr("url.scheme", def.scheme) + return resource +} + +func makeResourceWithJobInstanceSchemeDuplicate(def *jobInstanceDefinition, hasHost bool) pcommon.Resource { + resource := pcommon.NewResource() + attrs := resource.Attributes() + // Using hardcoded values to assert on outward expectations so that + // when variables change, these tests will fail and we'll have reports. + attrs.PutStr("service.name", def.job) + if hasHost { + attrs.PutStr("net.host.name", def.host) + attrs.PutStr("server.address", def.host) + } + attrs.PutStr("service.instance.id", def.instance) + attrs.PutStr("net.host.port", def.port) + attrs.PutStr("http.scheme", def.scheme) + attrs.PutStr("server.port", def.port) + attrs.PutStr("url.scheme", def.scheme) + return resource +} + +func TestCreateNodeAndResourcePromToOTLP(t *testing.T) { + tests := []struct { + name, job string + instance string + sdLabels labels.Labels + removeOldSemconvFeatureGate bool + want pcommon.Resource + }{ + { + name: "all attributes proper", + job: "job", instance: "hostname:8888", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "http"}), + removeOldSemconvFeatureGate: true, + want: makeResourceWithJobInstanceScheme(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, true), + }, + { + name: "missing port", + job: "job", instance: "myinstance", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "https"}), + removeOldSemconvFeatureGate: true, + want: makeResourceWithJobInstanceScheme(&jobInstanceDefinition{ + "job", "myinstance", "myinstance", "https", "", + }, true), + }, + { + name: "blank scheme", + job: "job", instance: "myinstance:443", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: ""}), + removeOldSemconvFeatureGate: true, + want: makeResourceWithJobInstanceScheme(&jobInstanceDefinition{ + "job", "myinstance:443", "myinstance", "", "443", + }, true), + }, + { + name: "blank instance, blank scheme", + job: "job", instance: "", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: ""}), + removeOldSemconvFeatureGate: true, + want: makeResourceWithJobInstanceScheme(&jobInstanceDefinition{ + "job", "", "", "", "", + }, true), + }, + { + name: "blank instance, non-blank scheme", + job: "job", instance: "", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "http"}), + removeOldSemconvFeatureGate: true, + want: makeResourceWithJobInstanceScheme(&jobInstanceDefinition{ + "job", "", "", "http", "", + }, true), + }, + { + name: "0.0.0.0 address", + job: "job", instance: "0.0.0.0:8888", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "http"}), + removeOldSemconvFeatureGate: true, + want: makeResourceWithJobInstanceScheme(&jobInstanceDefinition{ + "job", "0.0.0.0:8888", "", "http", "8888", + }, false), + }, + { + name: "localhost", + job: "job", instance: "localhost:8888", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "http"}), + removeOldSemconvFeatureGate: true, + want: makeResourceWithJobInstanceScheme(&jobInstanceDefinition{ + "job", "localhost:8888", "", "http", "8888", + }, false), + }, + { + name: "all attributes proper with duplicates", + job: "job", instance: "hostname:8888", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "http"}), + want: makeResourceWithJobInstanceSchemeDuplicate(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, true), + }, + { + name: "missing port with duplicates", + job: "job", instance: "myinstance", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "https"}), + want: makeResourceWithJobInstanceSchemeDuplicate(&jobInstanceDefinition{ + "job", "myinstance", "myinstance", "https", "", + }, true), + }, + { + name: "blank scheme with duplicates", + job: "job", instance: "myinstance:443", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: ""}), + want: makeResourceWithJobInstanceSchemeDuplicate(&jobInstanceDefinition{ + "job", "myinstance:443", "myinstance", "", "443", + }, true), + }, + { + name: "blank instance, blank scheme with duplicates", + job: "job", instance: "", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: ""}), + want: makeResourceWithJobInstanceSchemeDuplicate(&jobInstanceDefinition{ + "job", "", "", "", "", + }, true), + }, + { + name: "blank instance, non-blank scheme with duplicates", + job: "job", instance: "", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "http"}), + want: makeResourceWithJobInstanceSchemeDuplicate(&jobInstanceDefinition{ + "job", "", "", "http", "", + }, true), + }, + { + name: "0.0.0.0 address with duplicates", + job: "job", instance: "0.0.0.0:8888", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "http"}), + want: makeResourceWithJobInstanceSchemeDuplicate(&jobInstanceDefinition{ + "job", "0.0.0.0:8888", "", "http", "8888", + }, false), + }, + { + name: "localhost with duplicates", + job: "job", instance: "localhost:8888", sdLabels: labels.New(labels.Label{Name: "__scheme__", Value: "http"}), + want: makeResourceWithJobInstanceSchemeDuplicate(&jobInstanceDefinition{ + "job", "localhost:8888", "", "http", "8888", + }, false), + }, + { + name: "kubernetes daemonset pod", + job: "job", instance: "hostname:8888", sdLabels: labels.New( + labels.Label{Name: "__scheme__", Value: "http"}, + labels.Label{Name: "__meta_kubernetes_pod_name", Value: "my-pod-23491"}, + labels.Label{Name: "__meta_kubernetes_pod_uid", Value: "84279wretgu89dg489q2"}, + labels.Label{Name: "__meta_kubernetes_pod_container_name", Value: "my-container"}, + labels.Label{Name: "__meta_kubernetes_pod_node_name", Value: "k8s-node-123"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_name", Value: "my-pod"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_kind", Value: "DaemonSet"}, + labels.Label{Name: "__meta_kubernetes_namespace", Value: "kube-system"}, + ), + removeOldSemconvFeatureGate: true, + want: makeK8sResource(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, &k8sResourceDefinition{ + podName: "my-pod-23491", + podUID: "84279wretgu89dg489q2", + container: "my-container", + node: "k8s-node-123", + ds: "my-pod", + ns: "kube-system", + }), + }, + { + name: "kubernetes replicaset pod", + job: "job", instance: "hostname:8888", sdLabels: labels.New( + labels.Label{Name: "__scheme__", Value: "http"}, + labels.Label{Name: "__meta_kubernetes_pod_name", Value: "my-pod-23491"}, + labels.Label{Name: "__meta_kubernetes_pod_uid", Value: "84279wretgu89dg489q2"}, + labels.Label{Name: "__meta_kubernetes_pod_container_name", Value: "my-container"}, + labels.Label{Name: "__meta_kubernetes_pod_node_name", Value: "k8s-node-123"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_name", Value: "my-pod"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_kind", Value: "ReplicaSet"}, + labels.Label{Name: "__meta_kubernetes_namespace", Value: "kube-system"}, + ), + removeOldSemconvFeatureGate: true, + want: makeK8sResource(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, &k8sResourceDefinition{ + podName: "my-pod-23491", + podUID: "84279wretgu89dg489q2", + container: "my-container", + node: "k8s-node-123", + rs: "my-pod", + ns: "kube-system", + }), + }, + { + name: "kubernetes statefulset pod", + job: "job", instance: "hostname:8888", sdLabels: labels.New( + labels.Label{Name: "__scheme__", Value: "http"}, + labels.Label{Name: "__meta_kubernetes_pod_name", Value: "my-pod-23491"}, + labels.Label{Name: "__meta_kubernetes_pod_uid", Value: "84279wretgu89dg489q2"}, + labels.Label{Name: "__meta_kubernetes_pod_container_name", Value: "my-container"}, + labels.Label{Name: "__meta_kubernetes_pod_node_name", Value: "k8s-node-123"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_name", Value: "my-pod"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_kind", Value: "StatefulSet"}, + labels.Label{Name: "__meta_kubernetes_namespace", Value: "kube-system"}, + ), + removeOldSemconvFeatureGate: true, + want: makeK8sResource(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, &k8sResourceDefinition{ + podName: "my-pod-23491", + podUID: "84279wretgu89dg489q2", + container: "my-container", + node: "k8s-node-123", + ss: "my-pod", + ns: "kube-system", + }), + }, + { + name: "kubernetes job pod", + job: "job", instance: "hostname:8888", sdLabels: labels.New( + labels.Label{Name: "__scheme__", Value: "http"}, + labels.Label{Name: "__meta_kubernetes_pod_name", Value: "my-pod-23491"}, + labels.Label{Name: "__meta_kubernetes_pod_uid", Value: "84279wretgu89dg489q2"}, + labels.Label{Name: "__meta_kubernetes_pod_container_name", Value: "my-container"}, + labels.Label{Name: "__meta_kubernetes_pod_node_name", Value: "k8s-node-123"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_name", Value: "my-pod"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_kind", Value: "Job"}, + labels.Label{Name: "__meta_kubernetes_namespace", Value: "kube-system"}, + ), + removeOldSemconvFeatureGate: true, + want: makeK8sResource(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, &k8sResourceDefinition{ + podName: "my-pod-23491", + podUID: "84279wretgu89dg489q2", + container: "my-container", + node: "k8s-node-123", + job: "my-pod", + ns: "kube-system", + }), + }, + { + name: "kubernetes cronjob pod", + job: "job", instance: "hostname:8888", sdLabels: labels.New( + labels.Label{Name: "__scheme__", Value: "http"}, + labels.Label{Name: "__meta_kubernetes_pod_name", Value: "my-pod-23491"}, + labels.Label{Name: "__meta_kubernetes_pod_uid", Value: "84279wretgu89dg489q2"}, + labels.Label{Name: "__meta_kubernetes_pod_container_name", Value: "my-container"}, + labels.Label{Name: "__meta_kubernetes_pod_node_name", Value: "k8s-node-123"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_name", Value: "my-pod"}, + labels.Label{Name: "__meta_kubernetes_pod_controller_kind", Value: "CronJob"}, + labels.Label{Name: "__meta_kubernetes_namespace", Value: "kube-system"}, + ), + removeOldSemconvFeatureGate: true, + want: makeK8sResource(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, &k8sResourceDefinition{ + podName: "my-pod-23491", + podUID: "84279wretgu89dg489q2", + container: "my-container", + node: "k8s-node-123", + cronjob: "my-pod", + ns: "kube-system", + }), + }, + { + name: "kubernetes node (e.g. kubelet)", + job: "job", instance: "hostname:8888", sdLabels: labels.New( + labels.Label{Name: "__scheme__", Value: "http"}, + labels.Label{Name: "__meta_kubernetes_node_name", Value: "k8s-node-123"}, + ), + removeOldSemconvFeatureGate: true, + want: makeK8sResource(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, &k8sResourceDefinition{ + node: "k8s-node-123", + }), + }, + { + name: "kubernetes service endpoint", + job: "job", instance: "hostname:8888", sdLabels: labels.New( + labels.Label{Name: "__scheme__", Value: "http"}, + labels.Label{Name: "__meta_kubernetes_endpoint_node_name", Value: "k8s-node-123"}, + ), + removeOldSemconvFeatureGate: true, + want: makeK8sResource(&jobInstanceDefinition{ + "job", "hostname:8888", "hostname", "http", "8888", + }, &k8sResourceDefinition{ + node: "k8s-node-123", + }), + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + SetFeatureGateForTest(t, &removeOldSemconvFeatureGateEnabled, tt.removeOldSemconvFeatureGate) + got := CreateResource(tt.job, tt.instance, tt.sdLabels) + require.Equal(t, tt.want.Attributes().AsRaw(), got.Attributes().AsRaw()) + }) + } +} + +// Force the state of feature gate for a test +// usage: defer SetFeatureGateForTest("gateName", true)() +func SetFeatureGateForTest(t testing.TB, gate *bool, enabled bool) func() { + originalValue := removeOldSemconvFeatureGateEnabled + removeOldSemconvFeatureGateEnabled = enabled + return func() { + removeOldSemconvFeatureGateEnabled = originalValue + } +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/starttimemetricadjuster.go b/pkg/promotel/internal/prometheusreceiver/internal/starttimemetricadjuster.go new file mode 100644 index 0000000000..2bce3b264b --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/starttimemetricadjuster.go @@ -0,0 +1,127 @@ +package internal + +import ( + "errors" + "regexp" + + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" +) + +var ( + errNoStartTimeMetrics = errors.New("start_time metric is missing") + errNoDataPointsStartTimeMetric = errors.New("start time metric with no data points") + errUnsupportedTypeStartTimeMetric = errors.New("unsupported data type for start time metric") +) + +type startTimeMetricAdjuster struct { + startTimeMetricRegex *regexp.Regexp + logger *zap.Logger +} + +// NewStartTimeMetricAdjuster returns a new MetricsAdjuster that adjust metrics' start times based on a start time metric. +func NewStartTimeMetricAdjuster(logger *zap.Logger, startTimeMetricRegex *regexp.Regexp) MetricsAdjuster { + return &startTimeMetricAdjuster{ + startTimeMetricRegex: startTimeMetricRegex, + logger: logger, + } +} + +func (stma *startTimeMetricAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { + startTime, err := stma.getStartTime(metrics) + if err != nil { + return err + } + + startTimeTs := timestampFromFloat64(startTime) + for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + rm := metrics.ResourceMetrics().At(i) + for j := 0; j < rm.ScopeMetrics().Len(); j++ { + ilm := rm.ScopeMetrics().At(j) + for k := 0; k < ilm.Metrics().Len(); k++ { + metric := ilm.Metrics().At(k) + switch metric.Type() { + case pmetric.MetricTypeGauge: + continue + + case pmetric.MetricTypeSum: + dataPoints := metric.Sum().DataPoints() + for l := 0; l < dataPoints.Len(); l++ { + dp := dataPoints.At(l) + dp.SetStartTimestamp(startTimeTs) + } + + case pmetric.MetricTypeSummary: + dataPoints := metric.Summary().DataPoints() + for l := 0; l < dataPoints.Len(); l++ { + dp := dataPoints.At(l) + dp.SetStartTimestamp(startTimeTs) + } + + case pmetric.MetricTypeHistogram: + dataPoints := metric.Histogram().DataPoints() + for l := 0; l < dataPoints.Len(); l++ { + dp := dataPoints.At(l) + dp.SetStartTimestamp(startTimeTs) + } + + case pmetric.MetricTypeExponentialHistogram: + dataPoints := metric.ExponentialHistogram().DataPoints() + for l := 0; l < dataPoints.Len(); l++ { + dp := dataPoints.At(l) + dp.SetStartTimestamp(startTimeTs) + } + + case pmetric.MetricTypeEmpty: + fallthrough + + default: + stma.logger.Warn("Unknown metric type", zap.String("type", metric.Type().String())) + } + } + } + } + + return nil +} + +func (stma *startTimeMetricAdjuster) getStartTime(metrics pmetric.Metrics) (float64, error) { + for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + rm := metrics.ResourceMetrics().At(i) + for j := 0; j < rm.ScopeMetrics().Len(); j++ { + ilm := rm.ScopeMetrics().At(j) + for k := 0; k < ilm.Metrics().Len(); k++ { + metric := ilm.Metrics().At(k) + if stma.matchStartTimeMetric(metric.Name()) { + switch metric.Type() { + case pmetric.MetricTypeGauge: + if metric.Gauge().DataPoints().Len() == 0 { + return 0.0, errNoDataPointsStartTimeMetric + } + return metric.Gauge().DataPoints().At(0).DoubleValue(), nil + + case pmetric.MetricTypeSum: + if metric.Sum().DataPoints().Len() == 0 { + return 0.0, errNoDataPointsStartTimeMetric + } + return metric.Sum().DataPoints().At(0).DoubleValue(), nil + + case pmetric.MetricTypeEmpty, pmetric.MetricTypeHistogram, pmetric.MetricTypeExponentialHistogram, pmetric.MetricTypeSummary: + fallthrough + default: + return 0, errUnsupportedTypeStartTimeMetric + } + } + } + } + } + return 0.0, errNoStartTimeMetrics +} + +func (stma *startTimeMetricAdjuster) matchStartTimeMetric(metricName string) bool { + if stma.startTimeMetricRegex != nil { + return stma.startTimeMetricRegex.MatchString(metricName) + } + + return metricName == startTimeMetricName +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/starttimemetricadjuster_test.go b/pkg/promotel/internal/prometheusreceiver/internal/starttimemetricadjuster_test.go new file mode 100644 index 0000000000..0d1b97592a --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/starttimemetricadjuster_test.go @@ -0,0 +1,154 @@ +package internal + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" +) + +func TestStartTimeMetricMatch(t *testing.T) { + const startTime = pcommon.Timestamp(123 * 1e9) + const currentTime = pcommon.Timestamp(126 * 1e9) + const matchBuilderStartTime = 124 + + tests := []struct { + name string + inputs pmetric.Metrics + startTimeMetricRegex *regexp.Regexp + expectedStartTime pcommon.Timestamp + expectedErr error + }{ + { + name: "regexp_match_sum_metric", + inputs: metrics( + sumMetric("test_sum_metric", doublePoint(nil, startTime, currentTime, 16)), + histogramMetric("test_histogram_metric", histogramPoint(nil, startTime, currentTime, []float64{1, 2}, []uint64{2, 3, 4})), + summaryMetric("test_summary_metric", summaryPoint(nil, startTime, currentTime, 10, 100, []float64{10, 50, 90}, []float64{9, 15, 48})), + sumMetric("example_process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime)), + sumMetric("process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime+1)), + exponentialHistogramMetric("test_exponential_histogram_metric", exponentialHistogramPointSimplified(nil, startTime, currentTime, 3, 1, -5, 3)), + ), + startTimeMetricRegex: regexp.MustCompile("^.*_process_start_time_seconds$"), + expectedStartTime: timestampFromFloat64(matchBuilderStartTime), + }, + { + name: "match_default_sum_start_time_metric", + inputs: metrics( + sumMetric("test_sum_metric", doublePoint(nil, startTime, currentTime, 16)), + histogramMetric("test_histogram_metric", histogramPoint(nil, startTime, currentTime, []float64{1, 2}, []uint64{2, 3, 4})), + summaryMetric("test_summary_metric", summaryPoint(nil, startTime, currentTime, 10, 100, []float64{10, 50, 90}, []float64{9, 15, 48})), + sumMetric("example_process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime)), + sumMetric("process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime+1)), + exponentialHistogramMetric("test_exponential_histogram_metric", exponentialHistogramPointSimplified(nil, startTime, currentTime, 3, 1, -5, 3)), + ), + expectedStartTime: timestampFromFloat64(matchBuilderStartTime + 1), + }, + { + name: "regexp_match_gauge_metric", + inputs: metrics( + sumMetric("test_sum_metric", doublePoint(nil, startTime, currentTime, 16)), + histogramMetric("test_histogram_metric", histogramPoint(nil, startTime, currentTime, []float64{1, 2}, []uint64{2, 3, 4})), + summaryMetric("test_summary_metric", summaryPoint(nil, startTime, currentTime, 10, 100, []float64{10, 50, 90}, []float64{9, 15, 48})), + gaugeMetric("example_process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime)), + gaugeMetric("process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime+1)), + ), + startTimeMetricRegex: regexp.MustCompile("^.*_process_start_time_seconds$"), + expectedStartTime: timestampFromFloat64(matchBuilderStartTime), + }, + { + name: "match_default_gauge_start_time_metric", + inputs: metrics( + sumMetric("test_sum_metric", doublePoint(nil, startTime, currentTime, 16)), + histogramMetric("test_histogram_metric", histogramPoint(nil, startTime, currentTime, []float64{1, 2}, []uint64{2, 3, 4})), + summaryMetric("test_summary_metric", summaryPoint(nil, startTime, currentTime, 10, 100, []float64{10, 50, 90}, []float64{9, 15, 48})), + gaugeMetric("example_process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime)), + gaugeMetric("process_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime+1)), + ), + expectedStartTime: timestampFromFloat64(matchBuilderStartTime + 1), + }, + { + name: "empty gauge start time metrics", + inputs: metrics( + gaugeMetric("process_start_time_seconds"), + ), + expectedErr: errNoDataPointsStartTimeMetric, + }, + { + name: "empty sum start time metrics", + inputs: metrics( + sumMetric("process_start_time_seconds"), + ), + expectedErr: errNoDataPointsStartTimeMetric, + }, + { + name: "unsupported type start time metric", + inputs: metrics( + histogramMetric("process_start_time_seconds"), + ), + expectedErr: errUnsupportedTypeStartTimeMetric, + }, + { + name: "regexp_nomatch", + inputs: metrics( + sumMetric("subprocess_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime)), + ), + startTimeMetricRegex: regexp.MustCompile("^.+_process_start_time_seconds$"), + expectedErr: errNoStartTimeMetrics, + }, + { + name: "nomatch_default_start_time_metric", + inputs: metrics( + gaugeMetric("subprocess_start_time_seconds", doublePoint(nil, startTime, currentTime, matchBuilderStartTime)), + ), + expectedErr: errNoStartTimeMetrics, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stma := NewStartTimeMetricAdjuster(zap.NewNop(), tt.startTimeMetricRegex) + if tt.expectedErr != nil { + assert.ErrorIs(t, stma.AdjustMetrics(tt.inputs), tt.expectedErr) + return + } + require.NoError(t, stma.AdjustMetrics(tt.inputs)) + for i := 0; i < tt.inputs.ResourceMetrics().Len(); i++ { + rm := tt.inputs.ResourceMetrics().At(i) + for j := 0; j < rm.ScopeMetrics().Len(); j++ { + ilm := rm.ScopeMetrics().At(j) + for k := 0; k < ilm.Metrics().Len(); k++ { + metric := ilm.Metrics().At(k) + switch metric.Type() { + case pmetric.MetricTypeSum: + dps := metric.Sum().DataPoints() + for l := 0; l < dps.Len(); l++ { + assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) + } + case pmetric.MetricTypeSummary: + dps := metric.Summary().DataPoints() + for l := 0; l < dps.Len(); l++ { + assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) + } + case pmetric.MetricTypeHistogram: + dps := metric.Histogram().DataPoints() + for l := 0; l < dps.Len(); l++ { + assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) + } + case pmetric.MetricTypeExponentialHistogram: + dps := metric.ExponentialHistogram().DataPoints() + for l := 0; l < dps.Len(); l++ { + assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) + } + case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge: + } + } + } + } + }) + } +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/transaction.go b/pkg/promotel/internal/prometheusreceiver/internal/transaction.go new file mode 100644 index 0000000000..e071e24580 --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/transaction.go @@ -0,0 +1,535 @@ +package internal + +import ( + "context" + "errors" + "fmt" + "math" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/storage" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/receiverhelper" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheus/scrape" + mdata "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheusreceiver/internal/metadata" + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/translator/prometheus" +) + +type resourceKey struct { + job string + instance string +} +type transaction struct { + isNew bool + trimSuffixes bool + enableNativeHistograms bool + ctx context.Context // nolint + families map[resourceKey]map[scopeID]map[string]*metricFamily + mc scrape.MetricMetadataStore + sink consumer.Metrics + externalLabels labels.Labels + nodeResources map[resourceKey]pcommon.Resource + scopeAttributes map[resourceKey]map[scopeID]pcommon.Map + logger *zap.Logger + buildInfo component.BuildInfo + metricAdjuster MetricsAdjuster + obsrecv *receiverhelper.ObsReport + // Used as buffer to calculate series ref hash. + bufBytes []byte +} + +var emptyScopeID scopeID + +type scopeID struct { + name string + version string +} + +func NewTransaction( + ctx context.Context, + metricAdjuster MetricsAdjuster, + sink consumer.Metrics, + externalLabels labels.Labels, + settings receiver.Settings, + obsrecv *receiverhelper.ObsReport, + trimSuffixes bool, + enableNativeHistograms bool, +) *transaction { + return newTransaction( + ctx, + metricAdjuster, + sink, + externalLabels, + settings, + obsrecv, + trimSuffixes, + enableNativeHistograms, + ) +} + +func newTransaction( + ctx context.Context, + metricAdjuster MetricsAdjuster, + sink consumer.Metrics, + externalLabels labels.Labels, + settings receiver.Settings, + obsrecv *receiverhelper.ObsReport, + trimSuffixes bool, + enableNativeHistograms bool, +) *transaction { + return &transaction{ + ctx: ctx, + families: make(map[resourceKey]map[scopeID]map[string]*metricFamily), + isNew: true, + trimSuffixes: trimSuffixes, + enableNativeHistograms: enableNativeHistograms, + sink: sink, + metricAdjuster: metricAdjuster, + externalLabels: externalLabels, + logger: settings.Logger, + buildInfo: settings.BuildInfo, + obsrecv: obsrecv, + bufBytes: make([]byte, 0, 1024), + scopeAttributes: make(map[resourceKey]map[scopeID]pcommon.Map), + nodeResources: map[resourceKey]pcommon.Resource{}, + } +} + +// Append always returns 0 to disable label caching. +func (t *transaction) Append(_ storage.SeriesRef, ls labels.Labels, atMs int64, val float64) (storage.SeriesRef, error) { + select { + case <-t.ctx.Done(): + return 0, errTransactionAborted + default: + } + + if t.externalLabels.Len() != 0 { + b := labels.NewBuilder(ls) + t.externalLabels.Range(func(l labels.Label) { + b.Set(l.Name, l.Value) + }) + ls = b.Labels() + } + + rKey, err := t.initTransaction(ls) + if err != nil { + return 0, err + } + + // Any datapoint with duplicate labels MUST be rejected per: + // * https://github.com/open-telemetry/wg-prometheus/issues/44 + // * https://github.com/open-telemetry/opentelemetry-collector/issues/3407 + // as Prometheus rejects such too as of version 2.16.0, released on 2020-02-13. + if dupLabel, hasDup := ls.HasDuplicateLabelNames(); hasDup { + return 0, fmt.Errorf("invalid sample: non-unique label names: %q", dupLabel) + } + + metricName := ls.Get(model.MetricNameLabel) + if metricName == "" { + return 0, errMetricNameNotFound + } + + // See https://www.prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series + // up: 1 if the instance is healthy, i.e. reachable, or 0 if the scrape failed. + // But it can also be a staleNaN, which is inserted when the target goes away. + if metricName == scrapeUpMetricName && val != 1.0 && !value.IsStaleNaN(val) { + if val == 0.0 { + t.logger.Warn("Failed to scrape Prometheus endpoint", + zap.Int64("scrape_timestamp", atMs), + zap.Stringer("target_labels", ls)) + } else { + t.logger.Warn("The 'up' metric contains invalid value", + zap.Float64("value", val), + zap.Int64("scrape_timestamp", atMs), + zap.Stringer("target_labels", ls)) + } + } + + // For the `target_info` metric we need to convert it to resource attributes. + if metricName == prometheus.TargetInfoMetricName { + t.AddTargetInfo(*rKey, ls) + return 0, nil + } + + // For the `otel_scope_info` metric we need to convert it to scope attributes. + if metricName == prometheus.ScopeInfoMetricName { + t.addScopeInfo(*rKey, ls) + return 0, nil + } + + curMF, existing := t.getOrCreateMetricFamily(*rKey, getScopeID(ls), metricName) + + if t.enableNativeHistograms && curMF.mtype == pmetric.MetricTypeExponentialHistogram { + // If a histogram has both classic and native version, the native histogram is scraped + // first. Getting a float sample for the same series means that `scrape_classic_histogram` + // is set to true in the scrape config. In this case, we should ignore the native histogram. + curMF.mtype = pmetric.MetricTypeHistogram + } + + seriesRef := t.getSeriesRef(ls, curMF.mtype) + err = curMF.addSeries(seriesRef, metricName, ls, atMs, val) + if err != nil { + // Handle special case of float sample indicating staleness of native + // histogram. This is similar to how Prometheus handles it, but we + // don't have access to the previous value so we're applying some + // heuristics to figure out if this is native histogram or not. + // The metric type will indicate histogram, but presumably there will be no + // _bucket, _count, _sum suffix or `le` label, which makes addSeries fail + // with errEmptyLeLabel. + if t.enableNativeHistograms && errors.Is(err, errEmptyLeLabel) && !existing && value.IsStaleNaN(val) && curMF.mtype == pmetric.MetricTypeHistogram { + mg := curMF.loadMetricGroupOrCreate(seriesRef, ls, atMs) + curMF.mtype = pmetric.MetricTypeExponentialHistogram + mg.mtype = pmetric.MetricTypeExponentialHistogram + _ = curMF.addExponentialHistogramSeries(seriesRef, metricName, ls, atMs, &histogram.Histogram{Sum: math.Float64frombits(value.StaleNaN)}, nil) + // ignore errors here, this is best effort. + } else { + t.logger.Warn("failed to add datapoint", zap.Error(err), zap.String("metric_name", metricName), zap.Any("labels", ls)) + } + } + + return 0, nil // never return errors, as that fails the whole scrape +} + +// getOrCreateMetricFamily returns the metric family for the given metric name and scope, +// and true if an existing family was found. +func (t *transaction) getOrCreateMetricFamily(key resourceKey, scope scopeID, mn string) (*metricFamily, bool) { + if _, ok := t.families[key]; !ok { + t.families[key] = make(map[scopeID]map[string]*metricFamily) + } + if _, ok := t.families[key][scope]; !ok { + t.families[key][scope] = make(map[string]*metricFamily) + } + + curMf, ok := t.families[key][scope][mn] + if !ok { + fn := mn + if _, ok := t.mc.GetMetadata(mn); !ok { + fn = normalizeMetricName(mn) + } + if mf, ok := t.families[key][scope][fn]; ok && mf.includesMetric(mn) { // nolint + curMf = mf + } else { + curMf = newMetricFamily(mn, t.mc, t.logger) + t.families[key][scope][curMf.name] = curMf + return curMf, false + } + } + return curMf, true +} + +func (t *transaction) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + select { + case <-t.ctx.Done(): + return 0, errTransactionAborted + default: + } + + rKey, err := t.initTransaction(l) + if err != nil { + return 0, err + } + + l = l.WithoutEmpty() + + if dupLabel, hasDup := l.HasDuplicateLabelNames(); hasDup { + return 0, fmt.Errorf("invalid sample: non-unique label names: %q", dupLabel) + } + + mn := l.Get(model.MetricNameLabel) + if mn == "" { + return 0, errMetricNameNotFound + } + + mf, _ := t.getOrCreateMetricFamily(*rKey, getScopeID(l), mn) + mf.addExemplar(t.getSeriesRef(l, mf.mtype), e) + + return 0, nil +} + +func (t *transaction) AppendHistogram(_ storage.SeriesRef, ls labels.Labels, atMs int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if !t.enableNativeHistograms { + return 0, nil + } + + select { + case <-t.ctx.Done(): + return 0, errTransactionAborted + default: + } + + if t.externalLabels.Len() != 0 { + b := labels.NewBuilder(ls) + t.externalLabels.Range(func(l labels.Label) { + b.Set(l.Name, l.Value) + }) + ls = b.Labels() + } + + rKey, err := t.initTransaction(ls) + if err != nil { + return 0, err + } + + // Any datapoint with duplicate labels MUST be rejected per: + // * https://github.com/open-telemetry/wg-prometheus/issues/44 + // * https://github.com/open-telemetry/opentelemetry-collector/issues/3407 + // as Prometheus rejects such too as of version 2.16.0, released on 2020-02-13. + if dupLabel, hasDup := ls.HasDuplicateLabelNames(); hasDup { + return 0, fmt.Errorf("invalid sample: non-unique label names: %q", dupLabel) + } + + metricName := ls.Get(model.MetricNameLabel) + if metricName == "" { + return 0, errMetricNameNotFound + } + + // The `up`, `target_info`, `otel_scope_info` metrics should never generate native histograms, + // thus we don't check for them here as opposed to the Append function. + + curMF, existing := t.getOrCreateMetricFamily(*rKey, getScopeID(ls), metricName) + if !existing { + curMF.mtype = pmetric.MetricTypeExponentialHistogram + } else if curMF.mtype != pmetric.MetricTypeExponentialHistogram { + // Already scraped as classic histogram. + return 0, nil + } + + if h != nil && h.CounterResetHint == histogram.GaugeType || fh != nil && fh.CounterResetHint == histogram.GaugeType { + t.logger.Warn("dropping unsupported gauge histogram datapoint", zap.String("metric_name", metricName), zap.Any("labels", ls)) + } + + err = curMF.addExponentialHistogramSeries(t.getSeriesRef(ls, curMF.mtype), metricName, ls, atMs, h, fh) + if err != nil { + t.logger.Warn("failed to add histogram datapoint", zap.Error(err), zap.String("metric_name", metricName), zap.Any("labels", ls)) + } + + return 0, nil // never return errors, as that fails the whole scrape +} + +func (t *transaction) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { + // TODO: implement this func + return 0, nil +} + +func (t *transaction) getSeriesRef(ls labels.Labels, mtype pmetric.MetricType) uint64 { + var hash uint64 + hash, t.bufBytes = getSeriesRef(t.bufBytes, ls, mtype) + return hash +} + +// getMetrics returns all metrics to the given slice. +// The only error returned by this function is errNoDataToBuild. +// TODO: USE THIS TO CONVERT PROM TO OTEL +func (t *transaction) getMetrics() (pmetric.Metrics, error) { + if len(t.families) == 0 { + return pmetric.Metrics{}, errNoDataToBuild + } + + md := pmetric.NewMetrics() + + for rKey, families := range t.families { + if len(families) == 0 { + continue + } + resource, ok := t.nodeResources[rKey] + if !ok { + continue + } + rms := md.ResourceMetrics().AppendEmpty() + resource.CopyTo(rms.Resource()) + + for scope, mfs := range families { + ils := rms.ScopeMetrics().AppendEmpty() + // If metrics don't include otel_scope_name or otel_scope_version + // labels, use the receiver name and version. + if scope == emptyScopeID { + ils.Scope().SetName(mdata.ScopeName) + ils.Scope().SetVersion(t.buildInfo.Version) + } else { + // Otherwise, use the scope that was provided with the metrics. + ils.Scope().SetName(scope.name) + ils.Scope().SetVersion(scope.version) + // If we got an otel_scope_info metric for that scope, get scope + // attributes from it. + if scopeAttributes, ok := t.scopeAttributes[rKey]; ok { + if attributes, ok := scopeAttributes[scope]; ok { + attributes.CopyTo(ils.Scope().Attributes()) + } + } + } + metrics := ils.Metrics() + for _, mf := range mfs { + mf.appendMetric(metrics, t.trimSuffixes) + } + } + } + // remove the resource if no metrics were added to avoid returning resources with empty data points + md.ResourceMetrics().RemoveIf(func(metrics pmetric.ResourceMetrics) bool { + if metrics.ScopeMetrics().Len() == 0 { + return true + } + remove := true + for i := 0; i < metrics.ScopeMetrics().Len(); i++ { + if metrics.ScopeMetrics().At(i).Metrics().Len() > 0 { + remove = false + break + } + } + return remove + }) + + return md, nil +} + +func getScopeID(ls labels.Labels) scopeID { + var scope scopeID + ls.Range(func(lbl labels.Label) { + if lbl.Name == prometheus.ScopeNameLabelKey { + scope.name = lbl.Value + } + if lbl.Name == prometheus.ScopeVersionLabelKey { + scope.version = lbl.Value + } + }) + return scope +} + +func (t *transaction) initTransaction(labels labels.Labels) (*resourceKey, error) { + target, ok := scrape.TargetFromContext(t.ctx) + if !ok { + return nil, errors.New("unable to find target in context") + } + t.mc, ok = scrape.MetricMetadataStoreFromContext(t.ctx) + if !ok { + return nil, errors.New("unable to find MetricMetadataStore in context") + } + + rKey, err := t.getJobAndInstance(labels) + if err != nil { + return nil, err + } + if _, ok := t.nodeResources[*rKey]; !ok { + t.nodeResources[*rKey] = CreateResource(rKey.job, rKey.instance, target.DiscoveredLabels()) + } + + t.isNew = false + return rKey, nil +} + +func (t *transaction) getJobAndInstance(labels labels.Labels) (*resourceKey, error) { + // first, try to get job and instance from the labels + job, instance := labels.Get(model.JobLabel), labels.Get(model.InstanceLabel) + if job != "" && instance != "" { + return &resourceKey{ + job: job, + instance: instance, + }, nil + } + + // if not available in the labels, try to fall back to the scrape job associated + // with the transaction. + // this can be the case for, e.g., aggregated metrics coming from a federate endpoint + // that represent the whole cluster, rather than an individual workload. + // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/32555 for reference + if target, ok := scrape.TargetFromContext(t.ctx); ok { + if job == "" { + job = target.GetValue(model.JobLabel) + } + if instance == "" { + instance = target.GetValue(model.InstanceLabel) + } + if job != "" && instance != "" { + return &resourceKey{ + job: job, + instance: instance, + }, nil + } + } + return nil, errNoJobInstance +} + +func (t *transaction) Commit() error { + if t.isNew { + return nil + } + + ctx := t.obsrecv.StartMetricsOp(t.ctx) + md, err := t.getMetrics() + if err != nil { + t.obsrecv.EndMetricsOp(ctx, dataformat, 0, err) + return err + } + + numPoints := md.DataPointCount() + if numPoints == 0 { + return nil + } + + if err = t.metricAdjuster.AdjustMetrics(md); err != nil { + t.obsrecv.EndMetricsOp(ctx, dataformat, numPoints, err) + return err + } + + err = t.sink.ConsumeMetrics(ctx, md) + t.obsrecv.EndMetricsOp(ctx, dataformat, numPoints, err) + return err +} + +func (t *transaction) Rollback() error { + return nil +} + +func (t *transaction) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { + // TODO: implement this func + return 0, nil +} + +func (t *transaction) AddTargetInfo(key resourceKey, ls labels.Labels) { + if resource, ok := t.nodeResources[key]; ok { + attrs := resource.Attributes() + ls.Range(func(lbl labels.Label) { + if lbl.Name == model.JobLabel || lbl.Name == model.InstanceLabel || lbl.Name == model.MetricNameLabel { + return + } + attrs.PutStr(lbl.Name, lbl.Value) + }) + } +} + +func (t *transaction) addScopeInfo(key resourceKey, ls labels.Labels) { + attrs := pcommon.NewMap() + scope := scopeID{} + ls.Range(func(lbl labels.Label) { + if lbl.Name == model.JobLabel || lbl.Name == model.InstanceLabel || lbl.Name == model.MetricNameLabel { + return + } + if lbl.Name == prometheus.ScopeNameLabelKey { + scope.name = lbl.Value + return + } + if lbl.Name == prometheus.ScopeVersionLabelKey { + scope.version = lbl.Value + return + } + attrs.PutStr(lbl.Name, lbl.Value) + }) + if _, ok := t.scopeAttributes[key]; !ok { + t.scopeAttributes[key] = make(map[scopeID]pcommon.Map) + } + t.scopeAttributes[key][scope] = attrs +} + +func getSeriesRef(bytes []byte, ls labels.Labels, mtype pmetric.MetricType) (uint64, []byte) { + return ls.HashWithoutLabels(bytes, getSortedNotUsefulLabels(mtype)...) +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/transaction_test.go b/pkg/promotel/internal/prometheusreceiver/internal/transaction_test.go new file mode 100644 index 0000000000..5910dde968 --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/transaction_test.go @@ -0,0 +1,2003 @@ +package internal + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver/receiverhelper" + "go.opentelemetry.io/collector/receiver/receivertest" + conventions "go.opentelemetry.io/collector/semconv/v1.27.0" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheus/scrape" +) + +const ( + startTimestamp = pcommon.Timestamp(1555366608340000000) + ts = int64(1555366610000) + interval = int64(15 * 1000) + tsNanos = pcommon.Timestamp(ts * 1e6) + tsPlusIntervalNanos = pcommon.Timestamp((ts + interval) * 1e6) +) + +var ( + target = scrape.NewTarget( + // processedLabels contain label values after processing (e.g. relabeling) + labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + }), + // discoveredLabels contain labels prior to any processing + labels.FromMap(map[string]string{ + model.AddressLabel: "address:8080", + model.SchemeLabel: "http", + }), + nil) + + scrapeCtx = scrape.ContextWithMetricMetadataStore( + scrape.ContextWithTarget(context.Background(), target), + testMetadataStore(testMetadata)) +) + +func TestTransactionCommitWithoutAdding(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testTransactionCommitWithoutAdding(t, enableNativeHistograms) + }) + } +} + +func testTransactionCommitWithoutAdding(t *testing.T, enableNativeHistograms bool) { + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + require.NoError(t, tr.Commit()) +} + +func TestTransactionRollbackDoesNothing(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testTransactionRollbackDoesNothing(t, enableNativeHistograms) + }) + } +} + +func testTransactionRollbackDoesNothing(t *testing.T, enableNativeHistograms bool) { + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + require.NoError(t, tr.Rollback()) +} + +func TestTransactionUpdateMetadataDoesNothing(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testTransactionUpdateMetadataDoesNothing(t, enableNativeHistograms) + }) + } +} + +func testTransactionUpdateMetadataDoesNothing(t *testing.T, enableNativeHistograms bool) { + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + _, err := tr.UpdateMetadata(0, labels.New(), metadata.Metadata{}) + require.NoError(t, err) +} + +func TestTransactionAppendNoTarget(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testTransactionAppendNoTarget(t, enableNativeHistograms) + }) + } +} + +func testTransactionAppendNoTarget(t *testing.T, enableNativeHistograms bool) { + badLabels := labels.FromStrings(model.MetricNameLabel, "counter_test") + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + _, err := tr.Append(0, badLabels, time.Now().Unix()*1000, 1.0) + assert.Error(t, err) +} + +func TestTransactionAppendNoMetricName(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testTransactionAppendNoMetricName(t, enableNativeHistograms) + }) + } +} + +func testTransactionAppendNoMetricName(t *testing.T, enableNativeHistograms bool) { + jobNotFoundLb := labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test2", + }) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + _, err := tr.Append(0, jobNotFoundLb, time.Now().Unix()*1000, 1.0) + require.ErrorIs(t, err, errMetricNameNotFound) + require.ErrorIs(t, tr.Commit(), errNoDataToBuild) +} + +func TestTransactionAppendEmptyMetricName(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testTransactionAppendEmptyMetricName(t, enableNativeHistograms) + }) + } +} + +func testTransactionAppendEmptyMetricName(t *testing.T, enableNativeHistograms bool) { + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + _, err := tr.Append(0, labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test2", + model.MetricNameLabel: "", + }), time.Now().Unix()*1000, 1.0) + assert.ErrorIs(t, err, errMetricNameNotFound) +} + +func TestTransactionAppendResource(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testTransactionAppendResource(t, enableNativeHistograms) + }) + } +} + +func testTransactionAppendResource(t *testing.T, enableNativeHistograms bool) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + _, err := tr.Append(0, labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test", + model.MetricNameLabel: "counter_test", + }), time.Now().Unix()*1000, 1.0) + require.NoError(t, err) + _, err = tr.Append(0, labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test", + model.MetricNameLabel: startTimeMetricName, + }), time.Now().UnixMilli(), 1.0) + require.NoError(t, err) + require.NoError(t, tr.Commit()) + expectedResource := CreateResource("test", "localhost:8080", labels.FromStrings(model.SchemeLabel, "http")) + mds := sink.AllMetrics() + require.Len(t, mds, 1) + gotResource := mds[0].ResourceMetrics().At(0).Resource() + require.Equal(t, expectedResource, gotResource) +} + +func TestTransactionAppendMultipleResources(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testTransactionAppendMultipleResources(t, enableNativeHistograms) + }) + } +} + +func testTransactionAppendMultipleResources(t *testing.T, enableNativeHistograms bool) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + _, err := tr.Append(0, labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test-1", + model.MetricNameLabel: "counter_test", + }), time.Now().Unix()*1000, 1.0) + require.NoError(t, err) + _, err = tr.Append(0, labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test-2", + model.MetricNameLabel: startTimeMetricName, + }), time.Now().UnixMilli(), 1.0) + require.NoError(t, err) + require.NoError(t, tr.Commit()) + + expectedResources := []pcommon.Resource{ + CreateResource("test-1", "localhost:8080", labels.FromStrings(model.SchemeLabel, "http")), + CreateResource("test-2", "localhost:8080", labels.FromStrings(model.SchemeLabel, "http")), + } + + mds := sink.AllMetrics() + require.Len(t, mds, 1) + require.Equal(t, 2, mds[0].ResourceMetrics().Len()) + + for _, expectedResource := range expectedResources { + foundResource := false + expectedServiceName, _ := expectedResource.Attributes().Get(conventions.AttributeServiceName) + for i := 0; i < mds[0].ResourceMetrics().Len(); i++ { + res := mds[0].ResourceMetrics().At(i).Resource() + if serviceName, ok := res.Attributes().Get(conventions.AttributeServiceName); ok { + if serviceName.AsString() == expectedServiceName.AsString() { + foundResource = true + require.Equal(t, expectedResource, res) + break + } + } + } + require.True(t, foundResource) + } +} + +func TestReceiverVersionAndNameAreAttached(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testReceiverVersionAndNameAreAttached(t, enableNativeHistograms) + }) + } +} + +func testReceiverVersionAndNameAreAttached(t *testing.T, enableNativeHistograms bool) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + _, err := tr.Append(0, labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test", + model.MetricNameLabel: "counter_test", + }), time.Now().Unix()*1000, 1.0) + require.NoError(t, err) + require.NoError(t, tr.Commit()) + + expectedResource := CreateResource("test", "localhost:8080", labels.FromStrings(model.SchemeLabel, "http")) + mds := sink.AllMetrics() + require.Len(t, mds, 1) + gotResource := mds[0].ResourceMetrics().At(0).Resource() + require.Equal(t, expectedResource, gotResource) + + gotScope := mds[0].ResourceMetrics().At(0).ScopeMetrics().At(0).Scope() + require.Contains(t, "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheusreceiver", gotScope.Name()) + require.Equal(t, component.NewDefaultBuildInfo().Version, gotScope.Version()) +} + +func TestTransactionCommitErrorWhenAdjusterError(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testTransactionCommitErrorWhenAdjusterError(t, enableNativeHistograms) + }) + } +} + +func testTransactionCommitErrorWhenAdjusterError(t *testing.T, enableNativeHistograms bool) { + goodLabels := labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test", + model.MetricNameLabel: "counter_test", + }) + sink := new(consumertest.MetricsSink) + adjusterErr := errors.New("adjuster error") + tr := newTransaction(scrapeCtx, &errorAdjuster{err: adjusterErr}, sink, labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + _, err := tr.Append(0, goodLabels, time.Now().Unix()*1000, 1.0) + require.NoError(t, err) + assert.ErrorIs(t, tr.Commit(), adjusterErr) +} + +// Ensure that we reject duplicate label keys. See https://github.com/open-telemetry/wg-prometheus/issues/44. +func TestTransactionAppendDuplicateLabels(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testTransactionAppendDuplicateLabels(t, enableNativeHistograms) + }) + } +} + +func testTransactionAppendDuplicateLabels(t *testing.T, enableNativeHistograms bool) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + + dupLabels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "counter_test", + "a", "1", + "a", "6", + "z", "9", + ) + + _, err := tr.Append(0, dupLabels, 1917, 1.0) + assert.ErrorContains(t, err, `invalid sample: non-unique label names: "a"`) +} + +func TestTransactionAppendHistogramNoLe(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testTransactionAppendHistogramNoLe(t, enableNativeHistograms) + }) + } +} + +func testTransactionAppendHistogramNoLe(t *testing.T, enableNativeHistograms bool) { + sink := new(consumertest.MetricsSink) + receiverSettings := receivertest.NewNopSettings() + core, observedLogs := observer.New(zap.InfoLevel) + receiverSettings.Logger = zap.New(core) + tr := newTransaction( + scrapeCtx, + &startTimeAdjuster{startTime: startTimestamp}, + sink, + labels.EmptyLabels(), + receiverSettings, + nopObsRecv(t), + false, + enableNativeHistograms, + ) + + goodLabels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "hist_test_bucket", + ) + + _, err := tr.Append(0, goodLabels, 1917, 1.0) + require.NoError(t, err) + assert.Equal(t, 1, observedLogs.Len()) + assert.Equal(t, 1, observedLogs.FilterMessage("failed to add datapoint").Len()) + + require.NoError(t, tr.Commit()) + assert.Empty(t, sink.AllMetrics()) +} + +func TestTransactionAppendSummaryNoQuantile(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testTransactionAppendSummaryNoQuantile(t, enableNativeHistograms) + }) + } +} + +func testTransactionAppendSummaryNoQuantile(t *testing.T, enableNativeHistograms bool) { + sink := new(consumertest.MetricsSink) + receiverSettings := receivertest.NewNopSettings() + core, observedLogs := observer.New(zap.InfoLevel) + receiverSettings.Logger = zap.New(core) + tr := newTransaction( + scrapeCtx, + &startTimeAdjuster{startTime: startTimestamp}, + sink, + labels.EmptyLabels(), + receiverSettings, + nopObsRecv(t), + false, + enableNativeHistograms, + ) + + goodLabels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "summary_test", + ) + + _, err := tr.Append(0, goodLabels, 1917, 1.0) + require.NoError(t, err) + assert.Equal(t, 1, observedLogs.Len()) + assert.Equal(t, 1, observedLogs.FilterMessage("failed to add datapoint").Len()) + + require.NoError(t, tr.Commit()) + assert.Empty(t, sink.AllMetrics()) +} + +func TestTransactionAppendValidAndInvalid(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testTransactionAppendValidAndInvalid(t, enableNativeHistograms) + }) + } +} + +func testTransactionAppendValidAndInvalid(t *testing.T, enableNativeHistograms bool) { + sink := new(consumertest.MetricsSink) + receiverSettings := receivertest.NewNopSettings() + core, observedLogs := observer.New(zap.InfoLevel) + receiverSettings.Logger = zap.New(core) + tr := newTransaction( + scrapeCtx, + &startTimeAdjuster{startTime: startTimestamp}, + sink, + labels.EmptyLabels(), + receiverSettings, + nopObsRecv(t), + false, + enableNativeHistograms, + ) + + // a valid counter + _, err := tr.Append(0, labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test", + model.MetricNameLabel: "counter_test", + }), time.Now().Unix()*1000, 1.0) + require.NoError(t, err) + + // summary without quantiles, should be ignored + summarylabels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "summary_test", + ) + + _, err = tr.Append(0, summarylabels, 1917, 1.0) + require.NoError(t, err) + + assert.Equal(t, 1, observedLogs.Len()) + assert.Equal(t, 1, observedLogs.FilterMessage("failed to add datapoint").Len()) + + require.NoError(t, tr.Commit()) + expectedResource := CreateResource("test", "localhost:8080", labels.FromStrings(model.SchemeLabel, "http")) + mds := sink.AllMetrics() + require.Len(t, mds, 1) + gotResource := mds[0].ResourceMetrics().At(0).Resource() + require.Equal(t, expectedResource, gotResource) + require.Equal(t, 1, mds[0].MetricCount()) +} + +func TestTransactionAppendWithEmptyLabelArrayFallbackToTargetLabels(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testTransactionAppendWithEmptyLabelArrayFallbackToTargetLabels(t, enableNativeHistograms) + }) + } +} + +func testTransactionAppendWithEmptyLabelArrayFallbackToTargetLabels(t *testing.T, enableNativeHistograms bool) { + sink := new(consumertest.MetricsSink) + + scrapeTarget := scrape.NewTarget( + // processedLabels contain label values after processing (e.g. relabeling) + labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "federate", + }), + // discoveredLabels contain labels prior to any processing + labels.FromMap(map[string]string{ + model.AddressLabel: "address:8080", + model.SchemeLabel: "http", + }), + nil) + + ctx := scrape.ContextWithMetricMetadataStore( + scrape.ContextWithTarget(context.Background(), scrapeTarget), + testMetadataStore(testMetadata)) + + tr := newTransaction(ctx, &startTimeAdjuster{startTime: startTimestamp}, sink, labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + + _, err := tr.Append(0, labels.FromMap(map[string]string{ + model.MetricNameLabel: "counter_test", + }), time.Now().Unix()*1000, 1.0) + require.NoError(t, err) +} + +func TestAppendExemplarWithNoMetricName(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testAppendExemplarWithNoMetricName(t, enableNativeHistograms) + }) + } +} + +func testAppendExemplarWithNoMetricName(t *testing.T, enableNativeHistograms bool) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + + labels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + ) + + _, err := tr.AppendExemplar(0, labels, exemplar.Exemplar{Value: 0}) + assert.Equal(t, errMetricNameNotFound, err) +} + +func TestAppendExemplarWithEmptyMetricName(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testAppendExemplarWithEmptyMetricName(t, enableNativeHistograms) + }) + } +} + +func testAppendExemplarWithEmptyMetricName(t *testing.T, enableNativeHistograms bool) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + + labels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "", + ) + _, err := tr.AppendExemplar(0, labels, exemplar.Exemplar{Value: 0}) + assert.Equal(t, errMetricNameNotFound, err) +} + +func TestAppendExemplarWithDuplicateLabels(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testAppendExemplarWithDuplicateLabels(t, enableNativeHistograms) + }) + } +} + +func testAppendExemplarWithDuplicateLabels(t *testing.T, enableNativeHistograms bool) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + + labels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "", + "a", "b", + "a", "c", + ) + _, err := tr.AppendExemplar(0, labels, exemplar.Exemplar{Value: 0}) + assert.ErrorContains(t, err, `invalid sample: non-unique label names: "a"`) +} + +func TestAppendExemplarWithoutAddingMetric(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testAppendExemplarWithoutAddingMetric(t, enableNativeHistograms) + }) + } +} + +func testAppendExemplarWithoutAddingMetric(t *testing.T, enableNativeHistograms bool) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + + labels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "counter_test", + "a", "b", + ) + _, err := tr.AppendExemplar(0, labels, exemplar.Exemplar{Value: 0}) + require.NoError(t, err) +} + +func TestAppendExemplarWithNoLabels(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testAppendExemplarWithNoLabels(t, enableNativeHistograms) + }) + } +} + +func testAppendExemplarWithNoLabels(t *testing.T, enableNativeHistograms bool) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + + _, err := tr.AppendExemplar(0, labels.EmptyLabels(), exemplar.Exemplar{Value: 0}) + assert.Equal(t, errNoJobInstance, err) +} + +func TestAppendExemplarWithEmptyLabelArray(t *testing.T) { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("enableNativeHistograms=%v", enableNativeHistograms), func(t *testing.T) { + testAppendExemplarWithEmptyLabelArray(t, enableNativeHistograms) + }) + } +} + +func testAppendExemplarWithEmptyLabelArray(t *testing.T, enableNativeHistograms bool) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + + _, err := tr.AppendExemplar(0, labels.FromStrings(), exemplar.Exemplar{Value: 0}) + assert.Equal(t, errNoJobInstance, err) +} + +func nopObsRecv(t *testing.T) *receiverhelper.ObsReport { + obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ + ReceiverID: component.MustNewID("prometheus"), + Transport: transport, + ReceiverCreateSettings: receivertest.NewNopSettings(), + }) + require.NoError(t, err) + return obsrecv +} + +func TestMetricBuilderCounters(t *testing.T) { + tests := []buildTestData{ + { + name: "single-item", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("counter_test", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("counter_test") + m0.Metadata().PutStr("prometheus.type", "counter") + sum := m0.SetEmptySum() + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum.SetIsMonotonic(true) + pt0 := sum.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "single-item-with-exemplars", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint( + "counter_test", + 100, + []exemplar.Exemplar{ + { + Value: 1, + Ts: 1663113420863, + Labels: labels.New([]labels.Label{{Name: model.MetricNameLabel, Value: "counter_test"}, {Name: model.JobLabel, Value: "job"}, {Name: model.InstanceLabel, Value: "instance"}, {Name: "foo", Value: "bar"}}...), + }, + { + Value: 1, + Ts: 1663113420863, + Labels: labels.New([]labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: ""}, {Name: "span_id", Value: ""}}...), + }, + { + Value: 1, + Ts: 1663113420863, + Labels: labels.New([]labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "10a47365b8aa04e08291fab9deca84db6170"}, {Name: "span_id", Value: "719cee4a669fd7d109ff"}}...), + }, + { + Value: 1, + Ts: 1663113420863, + Labels: labels.New([]labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "174137cab66dc880"}, {Name: "span_id", Value: "dfa4597a9d"}}...), + }, + }, + "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("counter_test") + m0.Metadata().PutStr("prometheus.type", "counter") + sum := m0.SetEmptySum() + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum.SetIsMonotonic(true) + pt0 := sum.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + e0 := pt0.Exemplars().AppendEmpty() + e0.SetTimestamp(timestampFromMs(1663113420863)) + e0.SetDoubleValue(1) + e0.FilteredAttributes().PutStr(model.MetricNameLabel, "counter_test") + e0.FilteredAttributes().PutStr("foo", "bar") + e0.FilteredAttributes().PutStr(model.InstanceLabel, "instance") + e0.FilteredAttributes().PutStr(model.JobLabel, "job") + + e1 := pt0.Exemplars().AppendEmpty() + e1.SetTimestamp(timestampFromMs(1663113420863)) + e1.SetDoubleValue(1) + e1.FilteredAttributes().PutStr("foo", "bar") + + e2 := pt0.Exemplars().AppendEmpty() + e2.SetTimestamp(timestampFromMs(1663113420863)) + e2.SetDoubleValue(1) + e2.FilteredAttributes().PutStr("foo", "bar") + e2.SetTraceID([16]byte{0x10, 0xa4, 0x73, 0x65, 0xb8, 0xaa, 0x04, 0xe0, 0x82, 0x91, 0xfa, 0xb9, 0xde, 0xca, 0x84, 0xdb}) + e2.SetSpanID([8]byte{0x71, 0x9c, 0xee, 0x4a, 0x66, 0x9f, 0xd7, 0xd1}) + + e3 := pt0.Exemplars().AppendEmpty() + e3.SetTimestamp(timestampFromMs(1663113420863)) + e3.SetDoubleValue(1) + e3.FilteredAttributes().PutStr("foo", "bar") + e3.SetTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x41, 0x37, 0xca, 0xb6, 0x6d, 0xc8, 0x80}) + e3.SetSpanID([8]byte{0x00, 0x00, 0x00, 0xdf, 0xa4, 0x59, 0x7a, 0x9d}) + + return []pmetric.Metrics{md0} + }, + }, + { + name: "two-items", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("counter_test", 150, nil, "foo", "bar"), + createDataPoint("counter_test", 25, nil, "foo", "other"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("counter_test") + m0.Metadata().PutStr("prometheus.type", "counter") + sum := m0.SetEmptySum() + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum.SetIsMonotonic(true) + pt0 := sum.DataPoints().AppendEmpty() + pt0.SetDoubleValue(150.0) + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + pt1 := sum.DataPoints().AppendEmpty() + pt1.SetDoubleValue(25.0) + pt1.SetStartTimestamp(startTimestamp) + pt1.SetTimestamp(tsNanos) + pt1.Attributes().PutStr("foo", "other") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "two-metrics", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("counter_test", 150, nil, "foo", "bar"), + createDataPoint("counter_test", 25, nil, "foo", "other"), + createDataPoint("counter_test2", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("counter_test") + m0.Metadata().PutStr("prometheus.type", "counter") + sum0 := m0.SetEmptySum() + sum0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum0.SetIsMonotonic(true) + pt0 := sum0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(150.0) + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + pt1 := sum0.DataPoints().AppendEmpty() + pt1.SetDoubleValue(25.0) + pt1.SetStartTimestamp(startTimestamp) + pt1.SetTimestamp(tsNanos) + pt1.Attributes().PutStr("foo", "other") + + m1 := mL0.AppendEmpty() + m1.SetName("counter_test2") + m1.Metadata().PutStr("prometheus.type", "counter") + sum1 := m1.SetEmptySum() + sum1.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum1.SetIsMonotonic(true) + pt2 := sum1.DataPoints().AppendEmpty() + pt2.SetDoubleValue(100.0) + pt2.SetStartTimestamp(startTimestamp) + pt2.SetTimestamp(tsNanos) + pt2.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "metrics-with-poor-names", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("poor_name_count", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("poor_name_count") + m0.Metadata().PutStr("prometheus.type", "counter") + sum := m0.SetEmptySum() + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum.SetIsMonotonic(true) + pt0 := sum.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + } + + for _, tt := range tests { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("%s/enableNativeHistograms=%v", tt.name, enableNativeHistograms), func(t *testing.T) { + tt.run(t, enableNativeHistograms) + }) + } + } +} + +func TestMetricBuilderGauges(t *testing.T) { + tests := []buildTestData{ + { + name: "one-gauge", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 100, nil, "foo", "bar"), + }, + }, + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 90, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("gauge_test") + m0.Metadata().PutStr("prometheus.type", "gauge") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + md1 := pmetric.NewMetrics() + mL1 := md1.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m1 := mL1.AppendEmpty() + m1.SetName("gauge_test") + m1.Metadata().PutStr("prometheus.type", "gauge") + gauge1 := m1.SetEmptyGauge() + pt1 := gauge1.DataPoints().AppendEmpty() + pt1.SetDoubleValue(90.0) + pt1.SetStartTimestamp(0) + pt1.SetTimestamp(tsPlusIntervalNanos) + pt1.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0, md1} + }, + }, + { + name: "one-gauge-with-exemplars", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint( + "gauge_test", + 100, + []exemplar.Exemplar{ + { + Value: 2, + Ts: 1663350815890, + Labels: labels.New([]labels.Label{{Name: model.MetricNameLabel, Value: "counter_test"}, {Name: model.JobLabel, Value: "job"}, {Name: model.InstanceLabel, Value: "instance"}, {Name: "foo", Value: "bar"}}...), + }, + { + Value: 2, + Ts: 1663350815890, + Labels: labels.New([]labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: ""}, {Name: "span_id", Value: ""}}...), + }, + { + Value: 2, + Ts: 1663350815890, + Labels: labels.New([]labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "10a47365b8aa04e08291fab9deca84db6170"}, {Name: "span_id", Value: "719cee4a669fd7d109ff"}}...), + }, + { + Value: 2, + Ts: 1663350815890, + Labels: labels.New([]labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "174137cab66dc880"}, {Name: "span_id", Value: "dfa4597a9d"}}...), + }, + }, + "foo", "bar"), + }, + }, + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 90, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("gauge_test") + m0.Metadata().PutStr("prometheus.type", "gauge") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + e0 := pt0.Exemplars().AppendEmpty() + e0.SetTimestamp(timestampFromMs(1663350815890)) + e0.SetDoubleValue(2) + e0.FilteredAttributes().PutStr(model.MetricNameLabel, "counter_test") + e0.FilteredAttributes().PutStr("foo", "bar") + e0.FilteredAttributes().PutStr(model.InstanceLabel, "instance") + e0.FilteredAttributes().PutStr(model.JobLabel, "job") + + e1 := pt0.Exemplars().AppendEmpty() + e1.SetTimestamp(timestampFromMs(1663350815890)) + e1.SetDoubleValue(2) + e1.FilteredAttributes().PutStr("foo", "bar") + + e2 := pt0.Exemplars().AppendEmpty() + e2.SetTimestamp(timestampFromMs(1663350815890)) + e2.SetDoubleValue(2) + e2.FilteredAttributes().PutStr("foo", "bar") + e2.SetTraceID([16]byte{0x10, 0xa4, 0x73, 0x65, 0xb8, 0xaa, 0x04, 0xe0, 0x82, 0x91, 0xfa, 0xb9, 0xde, 0xca, 0x84, 0xdb}) + e2.SetSpanID([8]byte{0x71, 0x9c, 0xee, 0x4a, 0x66, 0x9f, 0xd7, 0xd1}) + + e3 := pt0.Exemplars().AppendEmpty() + e3.SetTimestamp(timestampFromMs(1663350815890)) + e3.SetDoubleValue(2) + e3.FilteredAttributes().PutStr("foo", "bar") + e3.SetTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x41, 0x37, 0xca, 0xb6, 0x6d, 0xc8, 0x80}) + e3.SetSpanID([8]byte{0x00, 0x00, 0x00, 0xdf, 0xa4, 0x59, 0x7a, 0x9d}) + + md1 := pmetric.NewMetrics() + mL1 := md1.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m1 := mL1.AppendEmpty() + m1.SetName("gauge_test") + m1.Metadata().PutStr("prometheus.type", "gauge") + gauge1 := m1.SetEmptyGauge() + pt1 := gauge1.DataPoints().AppendEmpty() + pt1.SetDoubleValue(90.0) + pt1.SetStartTimestamp(0) + pt1.SetTimestamp(tsPlusIntervalNanos) + pt1.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0, md1} + }, + }, + { + name: "gauge-with-different-tags", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 100, nil, "foo", "bar"), + createDataPoint("gauge_test", 200, nil, "bar", "foo"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("gauge_test") + m0.Metadata().PutStr("prometheus.type", "gauge") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + pt1 := gauge0.DataPoints().AppendEmpty() + pt1.SetDoubleValue(200.0) + pt1.SetStartTimestamp(0) + pt1.SetTimestamp(tsNanos) + pt1.Attributes().PutStr("bar", "foo") + + return []pmetric.Metrics{md0} + }, + }, + { + // TODO: A decision need to be made. If we want to have the behavior which can generate different tag key + // sets because metrics come and go + name: "gauge-comes-and-go-with-different-tagset", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 100, nil, "foo", "bar"), + createDataPoint("gauge_test", 200, nil, "bar", "foo"), + }, + }, + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 20, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("gauge_test") + m0.Metadata().PutStr("prometheus.type", "gauge") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + pt1 := gauge0.DataPoints().AppendEmpty() + pt1.SetDoubleValue(200.0) + pt1.SetStartTimestamp(0) + pt1.SetTimestamp(tsNanos) + pt1.Attributes().PutStr("bar", "foo") + + md1 := pmetric.NewMetrics() + mL1 := md1.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m1 := mL1.AppendEmpty() + m1.SetName("gauge_test") + m1.Metadata().PutStr("prometheus.type", "gauge") + gauge1 := m1.SetEmptyGauge() + pt2 := gauge1.DataPoints().AppendEmpty() + pt2.SetDoubleValue(20.0) + pt2.SetStartTimestamp(0) + pt2.SetTimestamp(tsPlusIntervalNanos) + pt2.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0, md1} + }, + }, + } + + for _, tt := range tests { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("%s/enableNativeHistograms=%v", tt.name, enableNativeHistograms), func(t *testing.T) { + tt.run(t, enableNativeHistograms) + }) + } + } +} + +func TestMetricBuilderUntyped(t *testing.T) { + tests := []buildTestData{ + { + name: "one-unknown", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("unknown_test", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("unknown_test") + m0.Metadata().PutStr("prometheus.type", "unknown") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "no-type-hint", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("something_not_exists", 100, nil, "foo", "bar"), + createDataPoint("theother_not_exists", 200, nil, "foo", "bar"), + createDataPoint("theother_not_exists", 300, nil, "bar", "foo"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("something_not_exists") + m0.Metadata().PutStr("prometheus.type", "unknown") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + m1 := mL0.AppendEmpty() + m1.SetName("theother_not_exists") + m1.Metadata().PutStr("prometheus.type", "unknown") + gauge1 := m1.SetEmptyGauge() + pt1 := gauge1.DataPoints().AppendEmpty() + pt1.SetDoubleValue(200.0) + pt1.SetTimestamp(tsNanos) + pt1.Attributes().PutStr("foo", "bar") + + pt2 := gauge1.DataPoints().AppendEmpty() + pt2.SetDoubleValue(300.0) + pt2.SetTimestamp(tsNanos) + pt2.Attributes().PutStr("bar", "foo") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "untype-metric-poor-names", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("some_count", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("some_count") + m0.Metadata().PutStr("prometheus.type", "unknown") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + } + + for _, tt := range tests { + for _, enableNativeHistograms := range []bool{true, false} { + t.Run(fmt.Sprintf("%s/enableNativeHistograms=%v", tt.name, enableNativeHistograms), func(t *testing.T) { + tt.run(t, enableNativeHistograms) + }) + } + } +} + +func TestMetricBuilderHistogram(t *testing.T) { + tests := []buildTestData{ + { + name: "single item", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + m0.Metadata().PutStr("prometheus.type", "histogram") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(10) + pt0.SetSum(99) + pt0.ExplicitBounds().FromRaw([]float64{10, 20}) + pt0.BucketCounts().FromRaw([]uint64{1, 1, 8}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "single item with exemplars", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint( + "hist_test_bucket", + 1, + []exemplar.Exemplar{ + { + Value: 1, + Ts: 1663113420863, + Labels: labels.New([]labels.Label{{Name: model.MetricNameLabel, Value: "counter_test"}, {Name: model.JobLabel, Value: "job"}, {Name: model.InstanceLabel, Value: "instance"}, {Name: "foo", Value: "bar"}}...), + }, + { + Value: 1, + Ts: 1663113420863, + Labels: labels.New([]labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: ""}, {Name: "span_id", Value: ""}, {Name: "le", Value: "20"}}...), + }, + { + Value: 1, + Ts: 1663113420863, + Labels: labels.New([]labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "10a47365b8aa04e08291fab9deca84db6170"}, {Name: "traceid", Value: "e3688e1aa2961786"}, {Name: "span_id", Value: "719cee4a669fd7d109ff"}}...), + }, + { + Value: 1, + Ts: 1663113420863, + Labels: labels.New([]labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "174137cab66dc880"}, {Name: "span_id", Value: "dfa4597a9d"}}...), + }, + { + Value: 1, + Ts: 1663113420863, + Labels: labels.New([]labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "174137cab66dc88"}, {Name: "span_id", Value: "dfa4597a9"}}...), + }, + }, + "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + m0.Metadata().PutStr("prometheus.type", "histogram") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(10) + pt0.SetSum(99) + pt0.ExplicitBounds().FromRaw([]float64{10, 20}) + pt0.BucketCounts().FromRaw([]uint64{1, 1, 8}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + e0 := pt0.Exemplars().AppendEmpty() + e0.SetTimestamp(timestampFromMs(1663113420863)) + e0.SetDoubleValue(1) + e0.FilteredAttributes().PutStr(model.MetricNameLabel, "counter_test") + e0.FilteredAttributes().PutStr("foo", "bar") + e0.FilteredAttributes().PutStr(model.InstanceLabel, "instance") + e0.FilteredAttributes().PutStr(model.JobLabel, "job") + + e1 := pt0.Exemplars().AppendEmpty() + e1.SetTimestamp(timestampFromMs(1663113420863)) + e1.SetDoubleValue(1) + e1.FilteredAttributes().PutStr("foo", "bar") + e1.FilteredAttributes().PutStr("le", "20") + + e2 := pt0.Exemplars().AppendEmpty() + e2.SetTimestamp(timestampFromMs(1663113420863)) + e2.SetDoubleValue(1) + e2.FilteredAttributes().PutStr("foo", "bar") + e2.FilteredAttributes().PutStr("traceid", "e3688e1aa2961786") + e2.SetTraceID([16]byte{0x10, 0xa4, 0x73, 0x65, 0xb8, 0xaa, 0x04, 0xe0, 0x82, 0x91, 0xfa, 0xb9, 0xde, 0xca, 0x84, 0xdb}) + e2.SetSpanID([8]byte{0x71, 0x9c, 0xee, 0x4a, 0x66, 0x9f, 0xd7, 0xd1}) + + e3 := pt0.Exemplars().AppendEmpty() + e3.SetTimestamp(timestampFromMs(1663113420863)) + e3.SetDoubleValue(1) + e3.FilteredAttributes().PutStr("foo", "bar") + e3.SetTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x41, 0x37, 0xca, 0xb6, 0x6d, 0xc8, 0x80}) + e3.SetSpanID([8]byte{0x00, 0x00, 0x00, 0xdf, 0xa4, 0x59, 0x7a, 0x9d}) + + e4 := pt0.Exemplars().AppendEmpty() + e4.SetTimestamp(timestampFromMs(1663113420863)) + e4.SetDoubleValue(1) + e4.FilteredAttributes().PutStr("foo", "bar") + e4.FilteredAttributes().PutStr("span_id", "dfa4597a9") + e4.FilteredAttributes().PutStr("trace_id", "174137cab66dc88") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "multi-groups", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), + createDataPoint("hist_test_bucket", 1, nil, "key2", "v2", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "key2", "v2", "le", "20"), + createDataPoint("hist_test_bucket", 3, nil, "key2", "v2", "le", "+inf"), + createDataPoint("hist_test_sum", 50, nil, "key2", "v2"), + createDataPoint("hist_test_count", 3, nil, "key2", "v2"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + m0.Metadata().PutStr("prometheus.type", "histogram") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(10) + pt0.SetSum(99) + pt0.ExplicitBounds().FromRaw([]float64{10, 20}) + pt0.BucketCounts().FromRaw([]uint64{1, 1, 8}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + pt1 := hist0.DataPoints().AppendEmpty() + pt1.SetCount(3) + pt1.SetSum(50) + pt1.ExplicitBounds().FromRaw([]float64{10, 20}) + pt1.BucketCounts().FromRaw([]uint64{1, 1, 1}) + pt1.SetTimestamp(tsNanos) + pt1.SetStartTimestamp(startTimestamp) + pt1.Attributes().PutStr("key2", "v2") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "multi-groups-and-families", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), + createDataPoint("hist_test_bucket", 1, nil, "key2", "v2", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "key2", "v2", "le", "20"), + createDataPoint("hist_test_bucket", 3, nil, "key2", "v2", "le", "+inf"), + createDataPoint("hist_test_sum", 50, nil, "key2", "v2"), + createDataPoint("hist_test_count", 3, nil, "key2", "v2"), + createDataPoint("hist_test2_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test2_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test2_bucket", 3, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test2_sum", 50, nil, "foo", "bar"), + createDataPoint("hist_test2_count", 3, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + m0.Metadata().PutStr("prometheus.type", "histogram") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(10) + pt0.SetSum(99) + pt0.ExplicitBounds().FromRaw([]float64{10, 20}) + pt0.BucketCounts().FromRaw([]uint64{1, 1, 8}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + pt1 := hist0.DataPoints().AppendEmpty() + pt1.SetCount(3) + pt1.SetSum(50) + pt1.ExplicitBounds().FromRaw([]float64{10, 20}) + pt1.BucketCounts().FromRaw([]uint64{1, 1, 1}) + pt1.SetTimestamp(tsNanos) + pt1.SetStartTimestamp(startTimestamp) + pt1.Attributes().PutStr("key2", "v2") + + m1 := mL0.AppendEmpty() + m1.SetName("hist_test2") + m1.Metadata().PutStr("prometheus.type", "histogram") + hist1 := m1.SetEmptyHistogram() + hist1.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt2 := hist1.DataPoints().AppendEmpty() + pt2.SetCount(3) + pt2.SetSum(50) + pt2.ExplicitBounds().FromRaw([]float64{10, 20}) + pt2.BucketCounts().FromRaw([]uint64{1, 1, 1}) + pt2.SetTimestamp(tsNanos) + pt2.SetStartTimestamp(startTimestamp) + pt2.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "unordered-buckets", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + m0.Metadata().PutStr("prometheus.type", "histogram") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(10) + pt0.SetSum(99) + pt0.ExplicitBounds().FromRaw([]float64{10, 20}) + pt0.BucketCounts().FromRaw([]uint64{1, 1, 8}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + // this won't likely happen in real env, as prometheus wont generate histogram with less than 3 buckets + name: "only-one-bucket", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 3, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_count", 3, nil, "foo", "bar"), + createDataPoint("hist_test_sum", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + m0.Metadata().PutStr("prometheus.type", "histogram") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(3) + pt0.SetSum(100) + pt0.BucketCounts().FromRaw([]uint64{3}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + // this won't likely happen in real env, as prometheus wont generate histogram with less than 3 buckets + name: "only-one-bucket-noninf", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 3, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_count", 3, nil, "foo", "bar"), + createDataPoint("hist_test_sum", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + m0.Metadata().PutStr("prometheus.type", "histogram") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(3) + pt0.SetSum(100) + pt0.BucketCounts().FromRaw([]uint64{3, 0}) + pt0.ExplicitBounds().FromRaw([]float64{20}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "no-sum", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 3, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_count", 3, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + m0.Metadata().PutStr("prometheus.type", "histogram") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(3) + pt0.ExplicitBounds().FromRaw([]float64{10, 20}) + pt0.BucketCounts().FromRaw([]uint64{1, 1, 1}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "corrupted-no-buckets", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + m0.Metadata().PutStr("prometheus.type", "histogram") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(10) + pt0.SetSum(99) + pt0.BucketCounts().FromRaw([]uint64{10}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "corrupted-no-count", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 3, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + return []pmetric.Metrics{pmetric.NewMetrics()} + }, + }, + } + + for _, tt := range tests { + for _, enableNativeHistograms := range []bool{true, false} { + // None of the histograms above have native histogram versions, so enabling native hisotgrams has no effect. + t.Run(fmt.Sprintf("%s/enableNativeHistograms=%v", tt.name, enableNativeHistograms), func(t *testing.T) { + tt.run(t, enableNativeHistograms) + }) + } + } +} + +func TestMetricBuilderSummary(t *testing.T) { + tests := []buildTestData{ + { + name: "no-sum-and-count", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("summary_test", 5, nil, "foo", "bar", "quantile", "1"), + }, + }, + }, + wants: func() []pmetric.Metrics { + return []pmetric.Metrics{pmetric.NewMetrics()} + }, + }, + { + name: "no-count", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("summary_test", 1, nil, "foo", "bar", "quantile", "0.5"), + createDataPoint("summary_test", 2, nil, "foo", "bar", "quantile", "0.75"), + createDataPoint("summary_test", 5, nil, "foo", "bar", "quantile", "1"), + createDataPoint("summary_test_sum", 500, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + return []pmetric.Metrics{pmetric.NewMetrics()} + }, + }, + { + name: "no-sum", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("summary_test", 1, nil, "foo", "bar", "quantile", "0.5"), + createDataPoint("summary_test", 2, nil, "foo", "bar", "quantile", "0.75"), + createDataPoint("summary_test", 5, nil, "foo", "bar", "quantile", "1"), + createDataPoint("summary_test_count", 500, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("summary_test") + m0.Metadata().PutStr("prometheus.type", "summary") + sum0 := m0.SetEmptySummary() + pt0 := sum0.DataPoints().AppendEmpty() + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.SetCount(500) + pt0.SetSum(0.0) + pt0.Attributes().PutStr("foo", "bar") + qvL := pt0.QuantileValues() + q50 := qvL.AppendEmpty() + q50.SetQuantile(.50) + q50.SetValue(1.0) + q75 := qvL.AppendEmpty() + q75.SetQuantile(.75) + q75.SetValue(2.0) + q100 := qvL.AppendEmpty() + q100.SetQuantile(1) + q100.SetValue(5.0) + return []pmetric.Metrics{md0} + }, + }, + { + name: "empty-quantiles", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("summary_test_sum", 100, nil, "foo", "bar"), + createDataPoint("summary_test_count", 500, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("summary_test") + m0.Metadata().PutStr("prometheus.type", "summary") + sum0 := m0.SetEmptySummary() + pt0 := sum0.DataPoints().AppendEmpty() + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.SetCount(500) + pt0.SetSum(100.0) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "regular-summary", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint("summary_test", 1, nil, "foo", "bar", "quantile", "0.5"), + createDataPoint("summary_test", 2, nil, "foo", "bar", "quantile", "0.75"), + createDataPoint("summary_test", 5, nil, "foo", "bar", "quantile", "1"), + createDataPoint("summary_test_sum", 100, nil, "foo", "bar"), + createDataPoint("summary_test_count", 500, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("summary_test") + m0.Metadata().PutStr("prometheus.type", "summary") + sum0 := m0.SetEmptySummary() + pt0 := sum0.DataPoints().AppendEmpty() + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.SetCount(500) + pt0.SetSum(100.0) + pt0.Attributes().PutStr("foo", "bar") + qvL := pt0.QuantileValues() + q50 := qvL.AppendEmpty() + q50.SetQuantile(.50) + q50.SetValue(1.0) + q75 := qvL.AppendEmpty() + q75.SetQuantile(.75) + q75.SetValue(2.0) + q100 := qvL.AppendEmpty() + q100.SetQuantile(1) + q100.SetValue(5.0) + + return []pmetric.Metrics{md0} + }, + }, + } + + for _, tt := range tests { + for _, enableNativeHistograms := range []bool{false, true} { + t.Run(fmt.Sprintf("%s/enableNativeHistograms=%v", tt.name, enableNativeHistograms), func(t *testing.T) { + tt.run(t, enableNativeHistograms) + }) + } + } +} + +func TestMetricBuilderNativeHistogram(t *testing.T) { + for _, enableNativeHistograms := range []bool{false, true} { + emptyH := &histogram.Histogram{ + Schema: 1, + Count: 0, + Sum: 0, + ZeroThreshold: 0.001, + ZeroCount: 0, + } + h0 := tsdbutil.GenerateTestHistogram(0) + + tests := []buildTestData{ + { + name: "empty integer histogram", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createHistogramDataPoint("hist_test", emptyH, nil, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + if !enableNativeHistograms { + return []pmetric.Metrics{md0} + } + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + m0.Metadata().PutStr("prometheus.type", "histogram") + m0.SetEmptyExponentialHistogram() + m0.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := m0.ExponentialHistogram().DataPoints().AppendEmpty() + pt0.Attributes().PutStr("foo", "bar") + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.SetCount(0) + pt0.SetSum(0) + pt0.SetZeroThreshold(0.001) + pt0.SetScale(1) + + return []pmetric.Metrics{md0} + }, + }, + { + name: "integer histogram", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createHistogramDataPoint("hist_test", h0, nil, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + if !enableNativeHistograms { + return []pmetric.Metrics{md0} + } + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + m0.Metadata().PutStr("prometheus.type", "histogram") + m0.SetEmptyExponentialHistogram() + m0.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := m0.ExponentialHistogram().DataPoints().AppendEmpty() + pt0.Attributes().PutStr("foo", "bar") + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.SetCount(12) + pt0.SetSum(18.4) + pt0.SetScale(1) + pt0.SetZeroThreshold(0.001) + pt0.SetZeroCount(2) + pt0.Positive().SetOffset(-1) + pt0.Positive().BucketCounts().Append(1) + pt0.Positive().BucketCounts().Append(2) + pt0.Positive().BucketCounts().Append(0) + pt0.Positive().BucketCounts().Append(1) + pt0.Positive().BucketCounts().Append(1) + pt0.Negative().SetOffset(-1) + pt0.Negative().BucketCounts().Append(1) + pt0.Negative().BucketCounts().Append(2) + pt0.Negative().BucketCounts().Append(0) + pt0.Negative().BucketCounts().Append(1) + pt0.Negative().BucketCounts().Append(1) + + return []pmetric.Metrics{md0} + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.run(t, enableNativeHistograms) + }) + } + } +} + +type buildTestData struct { + name string + inputs []*testScrapedPage + wants func() []pmetric.Metrics +} + +func (tt buildTestData) run(t *testing.T, enableNativeHistograms bool) { + wants := tt.wants() + assert.EqualValues(t, len(wants), len(tt.inputs)) + st := ts + for i, page := range tt.inputs { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, labels.EmptyLabels(), receivertest.NewNopSettings(), nopObsRecv(t), false, enableNativeHistograms) + for _, pt := range page.pts { + // set ts for testing + pt.t = st + var err error + switch { + case pt.fh != nil: + _, err = tr.AppendHistogram(0, pt.lb, pt.t, nil, pt.fh) + case pt.h != nil: + _, err = tr.AppendHistogram(0, pt.lb, pt.t, pt.h, nil) + default: + _, err = tr.Append(0, pt.lb, pt.t, pt.v) + } + require.NoError(t, err) + + for _, e := range pt.exemplars { + _, err := tr.AppendExemplar(0, pt.lb, e) + require.NoError(t, err) + } + } + require.NoError(t, tr.Commit()) + mds := sink.AllMetrics() + if wants[i].ResourceMetrics().Len() == 0 { + // Receiver does not emit empty metrics, so will not have anything in the sink. + require.Empty(t, mds) + st += interval + continue + } + require.Len(t, mds, 1) + assertEquivalentMetrics(t, wants[i], mds[0]) + st += interval + } +} + +type errorAdjuster struct { + err error +} + +func (ea *errorAdjuster) AdjustMetrics(pmetric.Metrics) error { + return ea.err +} + +type startTimeAdjuster struct { + startTime pcommon.Timestamp +} + +func (s *startTimeAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { + for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + rm := metrics.ResourceMetrics().At(i) + for j := 0; j < rm.ScopeMetrics().Len(); j++ { + ilm := rm.ScopeMetrics().At(j) + for k := 0; k < ilm.Metrics().Len(); k++ { + metric := ilm.Metrics().At(k) + switch metric.Type() { + case pmetric.MetricTypeSum: + dps := metric.Sum().DataPoints() + for l := 0; l < dps.Len(); l++ { + dps.At(l).SetStartTimestamp(s.startTime) + } + case pmetric.MetricTypeSummary: + dps := metric.Summary().DataPoints() + for l := 0; l < dps.Len(); l++ { + dps.At(l).SetStartTimestamp(s.startTime) + } + case pmetric.MetricTypeHistogram: + dps := metric.Histogram().DataPoints() + for l := 0; l < dps.Len(); l++ { + dps.At(l).SetStartTimestamp(s.startTime) + } + case pmetric.MetricTypeExponentialHistogram: + dps := metric.ExponentialHistogram().DataPoints() + for l := 0; l < dps.Len(); l++ { + dps.At(l).SetStartTimestamp(s.startTime) + } + case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge: + } + } + } + } + return nil +} + +type testDataPoint struct { + lb labels.Labels + t int64 + v float64 + h *histogram.Histogram + fh *histogram.FloatHistogram + exemplars []exemplar.Exemplar +} + +type testScrapedPage struct { + pts []*testDataPoint +} + +func createDataPoint(mname string, value float64, es []exemplar.Exemplar, tagPairs ...string) *testDataPoint { + var lbls []string + lbls = append(lbls, tagPairs...) + lbls = append(lbls, model.MetricNameLabel, mname) + lbls = append(lbls, model.JobLabel, "job") + lbls = append(lbls, model.InstanceLabel, "instance") + + return &testDataPoint{ + lb: labels.FromStrings(lbls...), + t: ts, + v: value, + exemplars: es, + } +} + +func createHistogramDataPoint(mname string, h *histogram.Histogram, fh *histogram.FloatHistogram, es []exemplar.Exemplar, tagPairs ...string) *testDataPoint { + dataPoint := createDataPoint(mname, 0, es, tagPairs...) + dataPoint.h = h + dataPoint.fh = fh + return dataPoint +} + +func assertEquivalentMetrics(t *testing.T, want, got pmetric.Metrics) { + require.Equal(t, want.ResourceMetrics().Len(), got.ResourceMetrics().Len()) + if want.ResourceMetrics().Len() == 0 { + return + } + for i := 0; i < want.ResourceMetrics().Len(); i++ { + wantSm := want.ResourceMetrics().At(i).ScopeMetrics() + gotSm := got.ResourceMetrics().At(i).ScopeMetrics() + require.Equal(t, wantSm.Len(), gotSm.Len()) + if wantSm.Len() == 0 { + return + } + + for j := 0; j < wantSm.Len(); j++ { + wantMs := wantSm.At(j).Metrics() + gotMs := gotSm.At(j).Metrics() + require.Equal(t, wantMs.Len(), gotMs.Len()) + + wmap := map[string]pmetric.Metric{} + gmap := map[string]pmetric.Metric{} + + for k := 0; k < wantMs.Len(); k++ { + wi := wantMs.At(k) + wmap[wi.Name()] = wi + gi := gotMs.At(k) + gmap[gi.Name()] = gi + } + assert.EqualValues(t, wmap, gmap) + } + } +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/util.go b/pkg/promotel/internal/prometheusreceiver/internal/util.go new file mode 100644 index 0000000000..e1b65137f8 --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/util.go @@ -0,0 +1,128 @@ +package internal + +import ( + "errors" + "sort" + "strconv" + "strings" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +const ( + metricsSuffixCount = "_count" + metricsSuffixBucket = "_bucket" + metricsSuffixSum = "_sum" + metricSuffixTotal = "_total" + metricSuffixInfo = "_info" + metricSuffixCreated = "_created" + startTimeMetricName = "process_start_time_seconds" + scrapeUpMetricName = "up" + + transport = "http" + dataformat = "prometheus" +) + +var ( + trimmableSuffixes = []string{metricsSuffixBucket, metricsSuffixCount, metricsSuffixSum, metricSuffixTotal, metricSuffixInfo, metricSuffixCreated} + errNoDataToBuild = errors.New("there's no data to build") + errNoBoundaryLabel = errors.New("given metricType has no 'le' or 'quantile' label") + errEmptyQuantileLabel = errors.New("'quantile' label on summary metric is missing or empty") + errEmptyLeLabel = errors.New("'le' label on histogram metric is missing or empty") + errMetricNameNotFound = errors.New("metricName not found from labels") + errTransactionAborted = errors.New("transaction aborted") + errNoJobInstance = errors.New("job or instance cannot be found from labels") + + notUsefulLabelsHistogram = sortString([]string{model.MetricNameLabel, model.InstanceLabel, model.SchemeLabel, model.MetricsPathLabel, model.JobLabel, model.BucketLabel}) + notUsefulLabelsSummary = sortString([]string{model.MetricNameLabel, model.InstanceLabel, model.SchemeLabel, model.MetricsPathLabel, model.JobLabel, model.QuantileLabel}) + notUsefulLabelsOther = sortString([]string{model.MetricNameLabel, model.InstanceLabel, model.SchemeLabel, model.MetricsPathLabel, model.JobLabel}) +) + +func sortString(strs []string) []string { + sort.Strings(strs) + return strs +} + +func getSortedNotUsefulLabels(mType pmetric.MetricType) []string { + switch mType { + case pmetric.MetricTypeHistogram: + return notUsefulLabelsHistogram + case pmetric.MetricTypeSummary: + return notUsefulLabelsSummary + case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge, pmetric.MetricTypeSum, pmetric.MetricTypeExponentialHistogram: + fallthrough + default: + return notUsefulLabelsOther + } +} + +func timestampFromFloat64(ts float64) pcommon.Timestamp { + secs := int64(ts) + nanos := int64((ts - float64(secs)) * 1e9) + return pcommon.Timestamp(secs*1e9 + nanos) // nolint +} + +func timestampFromMs(timeAtMs int64) pcommon.Timestamp { + return pcommon.Timestamp(timeAtMs * 1e6) // nolint +} + +func getBoundary(metricType pmetric.MetricType, labels labels.Labels) (float64, error) { + var val string + switch metricType { + case pmetric.MetricTypeHistogram: + val = labels.Get(model.BucketLabel) + if val == "" { + return 0, errEmptyLeLabel + } + case pmetric.MetricTypeSummary: + val = labels.Get(model.QuantileLabel) + if val == "" { + return 0, errEmptyQuantileLabel + } + case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge, pmetric.MetricTypeSum, pmetric.MetricTypeExponentialHistogram: + fallthrough + default: + return 0, errNoBoundaryLabel + } + + return strconv.ParseFloat(val, 64) +} + +// convToMetricType returns the data type and if it is monotonic +func convToMetricType(metricType model.MetricType) (pmetric.MetricType, bool) { + switch metricType { + case model.MetricTypeCounter: + // always use float64, as it's the internal data type used in prometheus + return pmetric.MetricTypeSum, true + // model.MetricTypeUnknown is converted to gauge by default to prevent Prometheus untyped metrics from being dropped + case model.MetricTypeGauge, model.MetricTypeUnknown: + return pmetric.MetricTypeGauge, false + case model.MetricTypeHistogram: + return pmetric.MetricTypeHistogram, true + // dropping support for gaugehistogram for now until we have an official spec of its implementation + // a draft can be found in: https://docs.google.com/document/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit#heading=h.1cvzqd4ksd23 + // case model.MetricTypeGaugeHistogram: + // return + case model.MetricTypeSummary: + return pmetric.MetricTypeSummary, true + case model.MetricTypeInfo, model.MetricTypeStateset: + return pmetric.MetricTypeSum, false + case model.MetricTypeGaugeHistogram: + fallthrough + default: + // including: model.MetricTypeGaugeHistogram + return pmetric.MetricTypeEmpty, false + } +} + +func normalizeMetricName(name string) string { + for _, s := range trimmableSuffixes { + if strings.HasSuffix(name, s) && name != s { + return strings.TrimSuffix(name, s) + } + } + return name +} diff --git a/pkg/promotel/internal/prometheusreceiver/internal/util_test.go b/pkg/promotel/internal/prometheusreceiver/internal/util_test.go new file mode 100644 index 0000000000..0a9b798f1c --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/internal/util_test.go @@ -0,0 +1,182 @@ +package internal + +import ( + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheus/scrape" +) + +var testMetadata = map[string]scrape.MetricMetadata{ + "counter_test": {Metric: "counter_test", Type: model.MetricTypeCounter, Help: "", Unit: ""}, + "counter_test2": {Metric: "counter_test2", Type: model.MetricTypeCounter, Help: "", Unit: ""}, + "gauge_test": {Metric: "gauge_test", Type: model.MetricTypeGauge, Help: "", Unit: ""}, + "gauge_test2": {Metric: "gauge_test2", Type: model.MetricTypeGauge, Help: "", Unit: ""}, + "hist_test": {Metric: "hist_test", Type: model.MetricTypeHistogram, Help: "", Unit: ""}, + "hist_test2": {Metric: "hist_test2", Type: model.MetricTypeHistogram, Help: "", Unit: ""}, + "ghist_test": {Metric: "ghist_test", Type: model.MetricTypeGaugeHistogram, Help: "", Unit: ""}, + "summary_test": {Metric: "summary_test", Type: model.MetricTypeSummary, Help: "", Unit: ""}, + "summary_test2": {Metric: "summary_test2", Type: model.MetricTypeSummary, Help: "", Unit: ""}, + "unknown_test": {Metric: "unknown_test", Type: model.MetricTypeUnknown, Help: "", Unit: ""}, + "poor_name": {Metric: "poor_name", Type: model.MetricTypeGauge, Help: "", Unit: ""}, + "poor_name_count": {Metric: "poor_name_count", Type: model.MetricTypeCounter, Help: "", Unit: ""}, + "scrape_foo": {Metric: "scrape_foo", Type: model.MetricTypeCounter, Help: "", Unit: ""}, + "example_process_start_time_seconds": { + Metric: "example_process_start_time_seconds", + Type: model.MetricTypeGauge, Help: "", Unit: "", + }, + "process_start_time_seconds": { + Metric: "process_start_time_seconds", + Type: model.MetricTypeGauge, Help: "", Unit: "", + }, + "subprocess_start_time_seconds": { + Metric: "subprocess_start_time_seconds", + Type: model.MetricTypeGauge, Help: "", Unit: "", + }, +} + +func TestTimestampFromMs(t *testing.T) { + assert.Equal(t, pcommon.Timestamp(0), timestampFromMs(0)) + assert.Equal(t, pcommon.NewTimestampFromTime(time.UnixMilli(1662679535432)), timestampFromMs(1662679535432)) +} + +func TestTimestampFromFloat64(t *testing.T) { + assert.Equal(t, pcommon.Timestamp(0), timestampFromFloat64(0)) + // Because of float64 conversion, we check only that we are within 100ns error. + assert.InEpsilon(t, uint64(1662679535040000000), uint64(timestampFromFloat64(1662679535.040)), 100) +} + +func TestConvToMetricType(t *testing.T) { + tests := []struct { + name string + mtype model.MetricType + want pmetric.MetricType + wantMonotonic bool + }{ + { + name: "model.counter", + mtype: model.MetricTypeCounter, + want: pmetric.MetricTypeSum, + wantMonotonic: true, + }, + { + name: "model.gauge", + mtype: model.MetricTypeGauge, + want: pmetric.MetricTypeGauge, + wantMonotonic: false, + }, + { + name: "model.unknown", + mtype: model.MetricTypeUnknown, + want: pmetric.MetricTypeGauge, + wantMonotonic: false, + }, + { + name: "model.histogram", + mtype: model.MetricTypeHistogram, + want: pmetric.MetricTypeHistogram, + wantMonotonic: true, + }, + { + name: "model.summary", + mtype: model.MetricTypeSummary, + want: pmetric.MetricTypeSummary, + wantMonotonic: true, + }, + { + name: "model.metric_type_info", + mtype: model.MetricTypeInfo, + want: pmetric.MetricTypeSum, + wantMonotonic: false, + }, + { + name: "model.metric_state_set", + mtype: model.MetricTypeStateset, + want: pmetric.MetricTypeSum, + wantMonotonic: false, + }, + { + name: "model.metric_gauge_hostogram", + mtype: model.MetricTypeGaugeHistogram, + want: pmetric.MetricTypeEmpty, + wantMonotonic: false, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + got, monotonic := convToMetricType(tt.mtype) + require.Equal(t, got.String(), tt.want.String()) + require.Equal(t, tt.wantMonotonic, monotonic) + }) + } +} + +func TestGetBoundary(t *testing.T) { + tests := []struct { + name string + mtype pmetric.MetricType + labels labels.Labels + wantValue float64 + wantErr error + }{ + { + name: "cumulative histogram with bucket label", + mtype: pmetric.MetricTypeHistogram, + labels: labels.FromStrings(model.BucketLabel, "0.256"), + wantValue: 0.256, + }, + { + name: "gauge histogram with bucket label", + mtype: pmetric.MetricTypeHistogram, + labels: labels.FromStrings(model.BucketLabel, "11.71"), + wantValue: 11.71, + }, + { + name: "summary with bucket label", + mtype: pmetric.MetricTypeSummary, + labels: labels.FromStrings(model.BucketLabel, "11.71"), + wantErr: errEmptyQuantileLabel, + }, + { + name: "summary with quantile label", + mtype: pmetric.MetricTypeSummary, + labels: labels.FromStrings(model.QuantileLabel, "92.88"), + wantValue: 92.88, + }, + { + name: "gauge histogram mismatched with bucket label", + mtype: pmetric.MetricTypeSummary, + labels: labels.FromStrings(model.BucketLabel, "11.71"), + wantErr: errEmptyQuantileLabel, + }, + { + name: "other data types without matches", + mtype: pmetric.MetricTypeGauge, + labels: labels.FromStrings(model.BucketLabel, "11.71"), + wantErr: errNoBoundaryLabel, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + value, err := getBoundary(tt.mtype, tt.labels) + if tt.wantErr != nil { + assert.ErrorIs(t, err, tt.wantErr) + return + } + + require.NoError(t, err) + assert.Equal(t, tt.wantValue, value) // nolint + }) + } +} diff --git a/pkg/promotel/internal/prometheusreceiver/metrics_receiver.go b/pkg/promotel/internal/prometheusreceiver/metrics_receiver.go new file mode 100644 index 0000000000..50309da7d7 --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/metrics_receiver.go @@ -0,0 +1,164 @@ +package prometheusreceiver + +import ( + "context" + "regexp" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheus/scrape" + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheusreceiver/internal" +) + +const ( + defaultGCInterval = 2 * time.Minute + gcIntervalDelta = 1 * time.Minute +) + +// pReceiver is the type that provides Prometheus scraper/receiver functionality. +type pReceiver struct { + cfg *Config + consumer consumer.Metrics + cancelFunc context.CancelFunc + configLoaded chan struct{} + loadConfigOnce sync.Once + + settings receiver.Settings + registerer prometheus.Registerer + gatherer prometheus.Gatherer + unregisterMetrics func() +} + +func NewPrometheusReceiver(set receiver.Settings, cfg *Config, next consumer.Metrics) *pReceiver { + return newPrometheusReceiver(set, cfg, next) +} + +// New creates a new prometheus.Receiver reference. +func newPrometheusReceiver(set receiver.Settings, cfg *Config, next consumer.Metrics) *pReceiver { + var ( + registerer prometheus.Registerer + gatherer prometheus.Gatherer + ) + if cfg.Registry != nil { + registerer = cfg.Registry + gatherer = cfg.Registry + } else { + registerer = prometheus.DefaultRegisterer + gatherer = prometheus.DefaultGatherer + } + + pr := &pReceiver{ + cfg: cfg, + consumer: next, + settings: set, + configLoaded: make(chan struct{}), + registerer: prometheus.WrapRegistererWith( + prometheus.Labels{"receiver": set.ID.String()}, + registerer), + gatherer: gatherer, + } + return pr +} + +// Start is the method that starts Prometheus scraping. It +// is controlled by having previously defined a Configuration using perhaps New. +func (r *pReceiver) Start(ctx context.Context, host component.Host) error { + discoveryCtx, cancel := context.WithCancel(context.Background()) + r.cancelFunc = cancel + + logger := internal.NewZapToGokitLogAdapter(r.settings.Logger) + + err := r.initPrometheusComponents(discoveryCtx, logger, host) + if err != nil { + r.settings.Logger.Error("Failed to initPrometheusComponents Prometheus components", zap.Error(err)) + return err + } + + r.loadConfigOnce.Do(func() { + close(r.configLoaded) + }) + + return nil +} + +func (r *pReceiver) initPrometheusComponents(ctx context.Context, logger log.Logger, host component.Host) error { + var startTimeMetricRegex *regexp.Regexp + var err error + if r.cfg.StartTimeMetricRegex != "" { + startTimeMetricRegex, err = regexp.Compile(r.cfg.StartTimeMetricRegex) + if err != nil { + return err + } + } + + store, err := internal.NewAppendable( + r.consumer, + r.settings, + gcInterval(r.cfg.PrometheusConfig), + r.cfg.UseStartTimeMetric, + startTimeMetricRegex, + false, + false, + r.cfg.PrometheusConfig.GlobalConfig.ExternalLabels, + r.cfg.TrimMetricSuffixes, + ) + if err != nil { + return err + } + + loop, err := scrape.NewGathererLoop(ctx, nil, store, r.registerer, r.gatherer, 10*time.Millisecond) + if err != nil { + return err + } + + r.unregisterMetrics = func() { + loop.UnregisterMetrics() + } + + go func() { + <-r.configLoaded + r.settings.Logger.Info("Starting gatherer loop") + // Run loop directly instead of scrape manager + loop.Run(nil) + // if err := r.scrapeManager.Run(r.discoveryManager.SyncCh()); err != nil { + // r.settings.Logger.Error("Scrape manager failed", zap.Error(err)) + // componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err)) + // } + }() + return nil +} + +// gcInterval returns the longest scrape interval used by a scrape config, +// plus a delta to prevent race conditions. +// This ensures jobs are not garbage collected between scrapes. +func gcInterval(cfg *PromConfig) time.Duration { + gcInterval := defaultGCInterval + if time.Duration(cfg.GlobalConfig.ScrapeInterval)+gcIntervalDelta > gcInterval { + gcInterval = time.Duration(cfg.GlobalConfig.ScrapeInterval) + gcIntervalDelta + } + for _, scrapeConfig := range cfg.ScrapeConfigs { + if time.Duration(scrapeConfig.ScrapeInterval)+gcIntervalDelta > gcInterval { + gcInterval = time.Duration(scrapeConfig.ScrapeInterval) + gcIntervalDelta + } + } + return gcInterval +} + +// Shutdown stops and cancels the underlying Prometheus scrapers. +func (r *pReceiver) Shutdown(context.Context) error { + if r.cancelFunc != nil { + r.cancelFunc() + } + if r.unregisterMetrics != nil { + r.unregisterMetrics() + } + return nil +} diff --git a/pkg/promotel/internal/prometheusreceiver/metrics_receiver_test.go b/pkg/promotel/internal/prometheusreceiver/metrics_receiver_test.go new file mode 100644 index 0000000000..1d7cc6035a --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/metrics_receiver_test.go @@ -0,0 +1,76 @@ +package prometheusreceiver_test + +import ( + "context" + "testing" + "time" + + promcfg "github.com/prometheus/prometheus/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver/receivertest" + + promreceiver "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheusreceiver" +) + +func TestReceiverEndToEnd(t *testing.T) { + //cfg, err := setupTestConfig("127.0.0.1:8888", "/metrics") + //require.NoError(t, err) + ctx := context.Background() + config := &promreceiver.Config{ + PrometheusConfig: (*promreceiver.PromConfig)(&promcfg.Config{}), + StartTimeMetricRegex: "", + } + + cms := new(consumertest.MetricsSink) + receiver := promreceiver.NewPrometheusReceiver(receivertest.NewNopSettings(), config, cms) + + require.NoError(t, receiver.Start(ctx, componenttest.NewNopHost())) + // verify state after shutdown is called + t.Cleanup(func() { + // verify state after shutdown is called + require.NoError(t, receiver.Shutdown(context.Background())) + // assert.Empty(t, flattenTargets(receiver.scrapeManager.TargetsAll()), "expected scrape manager to have no targets") + }) + // Wait for some scrape results to be collected + assert.Eventually(t, func() bool { + // This is the receiver's pov as to what should have been collected from the server + metrics := cms.AllMetrics() + return len(metrics) > 0 + }, 30*time.Second, 500*time.Millisecond) + + // This begins the processing of the scrapes collected by the receiver + metrics := cms.AllMetrics() + // split and store results by target name + pResults := splitMetricsByTarget(metrics) + for _, scrapes := range pResults { + assert.NotEmpty(t, scrapes) + for _, scrape := range scrapes { + // Verify that each scrape contains expected metrics + ilms := scrape.ScopeMetrics() + for j := 0; j < ilms.Len(); j++ { + metrics := ilms.At(j).Metrics() + assert.NotEmpty(t, metrics, "expected non-empty metrics") + for k := 0; k < metrics.Len(); k++ { + metric := metrics.At(k) + assert.NotEmpty(t, metric.Name(), "expected metric to have a name") + } + } + } + } +} + +func splitMetricsByTarget(metrics []pmetric.Metrics) map[string][]pmetric.ResourceMetrics { + pResults := make(map[string][]pmetric.ResourceMetrics) + for _, md := range metrics { + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + name, _ := rms.At(i).Resource().Attributes().Get("service.name") + pResults[name.AsString()] = append(pResults[name.AsString()], rms.At(i)) + } + } + return pResults +} diff --git a/pkg/promotel/internal/prometheusreceiver/testdata/config.yaml b/pkg/promotel/internal/prometheusreceiver/testdata/config.yaml new file mode 100644 index 0000000000..860ceb6e67 --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/testdata/config.yaml @@ -0,0 +1,25 @@ +prometheus: +prometheus/customname: + trim_metric_suffixes: true + use_start_time_metric: true + start_time_metric_regex: '^(.+_)*process_start_time_seconds$' + report_extra_scrape_metrics: true + target_allocator: + endpoint: http://my-targetallocator-service + interval: 30s + collector_id: collector-1 + # imported struct from the Prometheus code base. Can be used optionally to configure the jobs as seen in the docs + # https://prometheus.io/docs/prometheus/latest/configuration/configuration/#http_sd_config + http_sd_config: + refresh_interval: 60s + basic_auth: + username: "prometheus" + password: "changeme" + http_scrape_config: + basic_auth: + username: "scrape_prometheus" + password: "scrape_changeme" + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s diff --git a/pkg/promotel/internal/prometheusreceiver/testdata/config_scrape_config_files.yaml b/pkg/promotel/internal/prometheusreceiver/testdata/config_scrape_config_files.yaml new file mode 100644 index 0000000000..271a289334 --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/testdata/config_scrape_config_files.yaml @@ -0,0 +1,8 @@ +prometheus: + trim_metric_suffixes: true + use_start_time_metric: true + start_time_metric_regex: '^(.+_)*process_start_time_seconds$' + report_extra_scrape_metrics: true + config: + scrape_config_files: + - ./testdata/scrape-config.yaml diff --git a/pkg/promotel/internal/prometheusreceiver/testdata/config_sd.yaml b/pkg/promotel/internal/prometheusreceiver/testdata/config_sd.yaml new file mode 100644 index 0000000000..f8afd00e9e --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/testdata/config_sd.yaml @@ -0,0 +1,70 @@ +prometheus: + config: + scrape_configs: + - job_name: file + file_sd_configs: + - files: + - './testdata/dummy.json' + - job_name: k8s + kubernetes_sd_configs: + - role: node + - job_name: ec2 + ec2_sd_configs: + - region: us-west-2 + - job_name: gce + gce_sd_configs: + - project: my-project + zone: my-zone + - job_name: dns + dns_sd_configs: + - names: + - name1 + - job_name: openstack + openstack_sd_configs: + - role: hypervisor + region: region + - job_name: hetzner + hetzner_sd_configs: + - role: robot + - job_name: marathon + marathon_sd_configs: + - servers: + - server1 + - job_name: nerve + nerve_sd_configs: + - servers: + - server1 + paths: + - /path1 + - job_name: serverset + serverset_sd_configs: + - servers: + - server1 + paths: + - /path1 + - job_name: triton + triton_sd_configs: + - account: account + dns_suffix: suffix + endpoint: endpoint + - job_name: eureka + eureka_sd_configs: + - server: http://server1 + - job_name: azure + azure_sd_configs: + - subscription_id: subscription + tenant_id: tenant + client_id: client + client_secret: secret + - job_name: consul + consul_sd_configs: + - server: server1 + - job_name: digitalocean + digitalocean_sd_configs: + - basic_auth: + username: username + password: password + - job_name: dockerswarm_sd_config + dockerswarm_sd_configs: + - host: host + role: nodes diff --git a/pkg/promotel/internal/prometheusreceiver/testdata/invalid-config-prometheus-section.yaml b/pkg/promotel/internal/prometheusreceiver/testdata/invalid-config-prometheus-section.yaml new file mode 100644 index 0000000000..08b36c4ed9 --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/testdata/invalid-config-prometheus-section.yaml @@ -0,0 +1,6 @@ +prometheus: + config: + use_start_time_metric: true + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s diff --git a/pkg/promotel/internal/prometheusreceiver/testdata/invalid-config-section.yaml b/pkg/promotel/internal/prometheusreceiver/testdata/invalid-config-section.yaml new file mode 100644 index 0000000000..da67b9d842 --- /dev/null +++ b/pkg/promotel/internal/prometheusreceiver/testdata/invalid-config-section.yaml @@ -0,0 +1,6 @@ +prometheus: + unknow_section: 1 + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s diff --git a/pkg/promotel/internal/settings.go b/pkg/promotel/internal/settings.go new file mode 100644 index 0000000000..79f61f5fe5 --- /dev/null +++ b/pkg/promotel/internal/settings.go @@ -0,0 +1,29 @@ +package internal + +import ( + "github.com/google/uuid" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/receiver" +) + +var defaultComponentType = component.MustNewType("nop") + +// NewReceiverSettings returns a new settings for factory.CreateMetrics function +func NewReceiverSettings(logger *zap.Logger) receiver.Settings { + return receiver.Settings{ + ID: component.NewIDWithName(defaultComponentType, uuid.NewString()), + TelemetrySettings: NewTelemetrySettings(logger), + BuildInfo: component.NewDefaultBuildInfo(), + } +} + +func NewExporterSettings(logger *zap.Logger) exporter.Settings { + return exporter.Settings{ + ID: component.NewIDWithName(defaultComponentType, uuid.NewString()), + TelemetrySettings: NewTelemetrySettings(logger), + BuildInfo: component.NewDefaultBuildInfo(), + } +} diff --git a/pkg/promotel/internal/telemetry.go b/pkg/promotel/internal/telemetry.go new file mode 100644 index 0000000000..936c83693a --- /dev/null +++ b/pkg/promotel/internal/telemetry.go @@ -0,0 +1,30 @@ +package internal + +import ( + "go.opentelemetry.io/otel/metric" + noopmetric "go.opentelemetry.io/otel/metric/noop" + nooptrace "go.opentelemetry.io/otel/trace/noop" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// NewTelemetrySettings returns a new telemetry settings for Create* functions. +func NewTelemetrySettings(logger *zap.Logger) component.TelemetrySettings { + l := zap.NewNop() + if logger != nil { + l = logger + } + return component.TelemetrySettings{ + Logger: l, + LeveledMeterProvider: func(_ configtelemetry.Level) metric.MeterProvider { + return noopmetric.NewMeterProvider() + }, + TracerProvider: nooptrace.NewTracerProvider(), + MeterProvider: noopmetric.NewMeterProvider(), + MetricsLevel: configtelemetry.LevelNone, + Resource: pcommon.NewResource(), + } +} diff --git a/pkg/promotel/internal/translator/prometheus/README.md b/pkg/promotel/internal/translator/prometheus/README.md new file mode 100644 index 0000000000..4e88796845 --- /dev/null +++ b/pkg/promotel/internal/translator/prometheus/README.md @@ -0,0 +1,115 @@ +# Prometheus Normalization + +[OpenTelemetry's metric semantic convention](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md) is not compatible with [Prometheus' own metrics naming convention](https://prometheus.io/docs/practices/naming/). This module provides centralized functions to convert OpenTelemetry metrics to Prometheus-compliant metrics. These functions are used by the following components for Prometheus: + +* [prometheusreceiver](../../../receiver/prometheusreceiver/) +* [prometheusexporter](../../../exporter/prometheusexporter/) +* [prometheusremotewriteexporter](../../../exporter/prometheusremotewriteexporter/) + +## Metric name + +### Full normalization + +> **Warning** +> +> This feature can be controlled with [feature gate](https://github.com/open-telemetry/opentelemetry-collector/tree/main/featuregate) `pkg.translator.prometheus.NormalizeName`. It is currently enabled by default (beta stage). +> +> Example of how to disable it: +> ```shell-session +> $ otelcol --config=config.yaml --feature-gates=-pkg.translator.prometheus.NormalizeName +> ``` + +#### List of transformations to convert OpenTelemetry metrics to Prometheus metrics + +| Case | Transformation | Example | +|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| Unsupported characters and extraneous underscores | Replace unsupported characters with underscores (`_`). Drop redundant, leading and trailing underscores. | `(lambda).function.executions(#)` → `lambda_function_executions` | +| Standard unit | Convert the unit from [Unified Code for Units of Measure](http://unitsofmeasure.org/ucum.html) to Prometheus standard and append | `system.filesystem.usage` with unit `By` → `system_filesystem_usage_bytes` | +| Non-standard unit (unit is surrounded with `{}`) | Drop the unit | `system.network.dropped` with unit `{packets}` → `system_network_dropped` | +| Non-standard unit (unit is **not** surrounded with `{}`) | Append the unit, if not already present, after sanitization (all non-alphanumeric chars are dropped) | `system.network.dropped` with unit `packets` → `system_network_dropped_packets` | +| Percentages (unit is `1`) | Append `_ratio` (for gauges only) | `system.memory.utilization` with unit `1` → `system_memory_utilization_ratio` | +| Percentages (unit is `%`) | Replace `%` with `percent` `_percent` | `storage.filesystem.utilization` with unit `%` → `storage_filesystem_utilization_percent` | +| Rates (unit contains `/`) | Replace `/` with `per` | `astro.light.speed` with unit `m/s` → `astro_light_speed_meters_per_second` | +| Counter | Append `_total` | `system.processes.created` → `system_processes_created_total` | + +List of standard OpenTelemetry units that will be translated to [Prometheus standard base units](https://prometheus.io/docs/practices/naming/#base-units): + +| OpenTelemetry Unit | Corresponding Prometheus Unit | +| ------------------ | ----------------------------- | +| **Time** | | +| `d` | `days` | +| `h` | `hours` | +| `min` | `minutes` | +| `s` | `seconds` | +| `ms` | `milliseconds` | +| `us` | `microseconds` | +| `ns` | `nanoseconds` | +| **Bytes** | | +| `By` | `bytes` | +| `KiBy` | `kibibytes` | +| `MiBy` | `mebibytes` | +| `GiBy` | `gibibytes` | +| `TiBy` | `tibibytes` | +| `KBy` | `kilobytes` | +| `MBy` | `megabytes` | +| `GBy` | `gigabytes` | +| `TBy` | `terabytes` | +| **SI Units** | | +| `m` | `meters` | +| `V` | `volts` | +| `A` | `amperes` | +| `J` | `joules` | +| `W` | `watts` | +| `g` | `grams` | +| **Misc.** | | +| `Cel` | `celsius` | +| `Hz` | `hertz` | +| `%` | `percent` | + +> **Note** +> Prometheus also recommends using base units (no kilobytes, or milliseconds, for example) but these functions will not attempt to convert non-base units to base units. + +#### List of transformations performed to convert Prometheus metrics to OpenTelemetry metrics + +| Case | Transformation | Example | +|------------------------------------|------------------------------------------------------------------------|---------------------------------------------------------------------------------| +| UNIT defined in OpenMetrics format | Drop the unit suffix and set it in the OpenTelemetry metric unit field | `system_network_dropped_packets` → `system_network_dropped` with `packets` unit | +| Counter | Drop `_total` suffix | `system_processes_created_total`→ `system_processes_created` | + +### Simple normalization + +If feature `pkg.translator.prometheus.NormalizeName` is not enabled, a simple sanitization of the OpenTelemetry metric name is performed to ensure it follows Prometheus naming conventions: + +* Drop unsupported characters and replace with underscores (`_`) +* Remove redundant, leading and trailing underscores +* Ensure metric name doesn't start with a digit by prefixing with an underscore + +No processing of the unit is performed, and `_total` is not appended for *Counters*. + +## Labels + +OpenTelemetry *Attributes* are converted to Prometheus labels and normalized to follow the [Prometheus labels naming rules](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). + +The following transformations are performed on OpenTelemetry *Attributes* to produce Prometheus labels: + +* Drop unsupported characters and replace with underscores (`_`) +* Prefix label with `key_` if it doesn't start with a letter, except if it's already prefixed with double-underscore (`__`) + +By default, labels that start with a simple underscore (`_`) are prefixed with `key`, which is strictly unnecessary to follow Prometheus labels naming rules. This behavior can be disabled with the feature `pkg.translator.prometheus.PermissiveLabelSanitization`, which must be activated with the feature gate option of the collector: + +```shell-session +$ otelcol --config=config.yaml --feature-gates=pkg.translator.prometheus.PermissiveLabelSanitization +``` + +Examples: + +| OpenTelemetry Attribute | Prometheus Label | +|---|---| +| `name` | `name` | +| `host.name` | `host_name` | +| `host_name` | `host_name` | +| `name (of the host)` | `name__of_the_host_` | +| `2 cents` | `key_2_cents` | +| `__name` | `__name` | +| `_name` | `key_name` | +| `_name` | `_name` (if `PermissiveLabelSanitization` is enabled) | diff --git a/pkg/promotel/internal/translator/prometheus/constants.go b/pkg/promotel/internal/translator/prometheus/constants.go new file mode 100644 index 0000000000..98c87b3e27 --- /dev/null +++ b/pkg/promotel/internal/translator/prometheus/constants.go @@ -0,0 +1,34 @@ +package prometheus + +const ( + // MetricMetadataTypeKey is the key used to store the original Prometheus + // type in metric metadata: + // https://github.com/open-telemetry/opentelemetry-specification/blob/e6eccba97ebaffbbfad6d4358408a2cead0ec2df/specification/compatibility/prometheus_and_openmetrics.md#metric-metadata + MetricMetadataTypeKey = "prometheus.type" + // ExemplarTraceIDKey is the key used to store the trace ID in Prometheus + // exemplars: + // https://github.com/open-telemetry/opentelemetry-specification/blob/e6eccba97ebaffbbfad6d4358408a2cead0ec2df/specification/compatibility/prometheus_and_openmetrics.md#exemplars + ExemplarTraceIDKey = "trace_id" + // ExemplarSpanIDKey is the key used to store the Span ID in Prometheus + // exemplars: + // https://github.com/open-telemetry/opentelemetry-specification/blob/e6eccba97ebaffbbfad6d4358408a2cead0ec2df/specification/compatibility/prometheus_and_openmetrics.md#exemplars + ExemplarSpanIDKey = "span_id" + // ScopeInfoMetricName is the name of the metric used to preserve scope + // attributes in Prometheus format: + // https://github.com/open-telemetry/opentelemetry-specification/blob/e6eccba97ebaffbbfad6d4358408a2cead0ec2df/specification/compatibility/prometheus_and_openmetrics.md#instrumentation-scope + ScopeInfoMetricName = "otel_scope_info" + // ScopeNameLabelKey is the name of the label key used to identify the name + // of the OpenTelemetry scope which produced the metric: + // https://github.com/open-telemetry/opentelemetry-specification/blob/e6eccba97ebaffbbfad6d4358408a2cead0ec2df/specification/compatibility/prometheus_and_openmetrics.md#instrumentation-scope + ScopeNameLabelKey = "otel_scope_name" + // ScopeVersionLabelKey is the name of the label key used to identify the + // version of the OpenTelemetry scope which produced the metric: + // https://github.com/open-telemetry/opentelemetry-specification/blob/e6eccba97ebaffbbfad6d4358408a2cead0ec2df/specification/compatibility/prometheus_and_openmetrics.md#instrumentation-scope + ScopeVersionLabelKey = "otel_scope_version" + // TargetInfoMetricName is the name of the metric used to preserve resource + // attributes in Prometheus format: + // https://github.com/open-telemetry/opentelemetry-specification/blob/e6eccba97ebaffbbfad6d4358408a2cead0ec2df/specification/compatibility/prometheus_and_openmetrics.md#resource-attributes-1 + // It originates from OpenMetrics: + // https://github.com/OpenObservability/OpenMetrics/blob/1386544931307dff279688f332890c31b6c5de36/specification/OpenMetrics.md#supporting-target-metadata-in-both-push-based-and-pull-based-systems + TargetInfoMetricName = "target_info" +) diff --git a/pkg/promotel/internal/translator/prometheus/metadata.yaml b/pkg/promotel/internal/translator/prometheus/metadata.yaml new file mode 100644 index 0000000000..227ec93766 --- /dev/null +++ b/pkg/promotel/internal/translator/prometheus/metadata.yaml @@ -0,0 +1,3 @@ +status: + codeowners: + active: [dashpole, bertysentry, ArthurSens] \ No newline at end of file diff --git a/pkg/promotel/internal/translator/prometheus/normalize_label.go b/pkg/promotel/internal/translator/prometheus/normalize_label.go new file mode 100644 index 0000000000..f9b095f4ce --- /dev/null +++ b/pkg/promotel/internal/translator/prometheus/normalize_label.go @@ -0,0 +1,42 @@ +package prometheus + +import ( + "strings" + "unicode" +) + +var dropSanitizationGateEnabled = true + +// Normalizes the specified label to follow Prometheus label names standard +// +// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels +// +// Labels that start with non-letter rune will be prefixed with "key_" +// +// Exception is made for double-underscores which are allowed +func NormalizeLabel(label string) string { + // Trivial case + if len(label) == 0 { + return label + } + + // Replace all non-alphanumeric runes with underscores + label = strings.Map(sanitizeRune, label) + + // If label starts with a number, prepend with "key_" + if unicode.IsDigit(rune(label[0])) { + label = "key_" + label + } else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") && !dropSanitizationGateEnabled { + label = "key" + label + } + + return label +} + +// Return '_' for anything non-alphanumeric +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + return '_' +} diff --git a/pkg/promotel/internal/translator/prometheus/normalize_label_test.go b/pkg/promotel/internal/translator/prometheus/normalize_label_test.go new file mode 100644 index 0000000000..8c556f4be6 --- /dev/null +++ b/pkg/promotel/internal/translator/prometheus/normalize_label_test.go @@ -0,0 +1,30 @@ +package prometheus + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSanitize(t *testing.T) { + dropSanitizationGateEnabled = false + defer func() { dropSanitizationGateEnabled = true }() + + require.Equal(t, "", NormalizeLabel(""), "") + require.Equal(t, "key_test", NormalizeLabel("_test")) + require.Equal(t, "key_0test", NormalizeLabel("0test")) + require.Equal(t, "test", NormalizeLabel("test")) + require.Equal(t, "test__", NormalizeLabel("test_/")) + require.Equal(t, "__test", NormalizeLabel("__test")) +} + +func TestSanitizeDropSanitization(t *testing.T) { + dropSanitizationGateEnabled = true + defer func() { dropSanitizationGateEnabled = true }() + + require.Equal(t, "", NormalizeLabel("")) + require.Equal(t, "_test", NormalizeLabel("_test")) + require.Equal(t, "key_0test", NormalizeLabel("0test")) + require.Equal(t, "test", NormalizeLabel("test")) + require.Equal(t, "__test", NormalizeLabel("__test")) +} diff --git a/pkg/promotel/internal/translator/prometheus/normalize_name.go b/pkg/promotel/internal/translator/prometheus/normalize_name.go new file mode 100644 index 0000000000..319c5b693d --- /dev/null +++ b/pkg/promotel/internal/translator/prometheus/normalize_name.go @@ -0,0 +1,268 @@ +package prometheus + +import ( + "strings" + "unicode" + + "go.opentelemetry.io/collector/pdata/pmetric" +) + +// The map to translate OTLP units to Prometheus units +// OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html +// (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units) +// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units +// OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units +var unitMap = map[string]string{ + // Time + "d": "days", + "h": "hours", + "min": "minutes", + "s": "seconds", + "ms": "milliseconds", + "us": "microseconds", + "ns": "nanoseconds", + + // Bytes + "By": "bytes", + "KiBy": "kibibytes", + "MiBy": "mebibytes", + "GiBy": "gibibytes", + "TiBy": "tibibytes", + "KBy": "kilobytes", + "MBy": "megabytes", + "GBy": "gigabytes", + "TBy": "terabytes", + + // SI + "m": "meters", + "V": "volts", + "A": "amperes", + "J": "joules", + "W": "watts", + "g": "grams", + + // Misc + "Cel": "celsius", + "Hz": "hertz", + "1": "", + "%": "percent", +} + +// The map that translates the "per" unit +// Example: s => per second (singular) +var perUnitMap = map[string]string{ + "s": "second", + "m": "minute", + "h": "hour", + "d": "day", + "w": "week", + "mo": "month", + "y": "year", +} + +var normalizeNameGateEnabled = true + +// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric +// +// Metric name is prefixed with specified namespace and underscore (if any). +// Namespace is not cleaned up. Make sure specified namespace follows Prometheus +// naming convention. +// +// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels +// and https://prometheus.io/docs/practices/naming/#metric-and-label-naming +func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { + var metricName string + + // Full normalization following standard Prometheus naming conventions + if addMetricSuffixes && normalizeNameGateEnabled { + return normalizeName(metric, namespace) + } + + // Simple case (no full normalization, no units, etc.), we simply trim out forbidden chars + metricName = RemovePromForbiddenRunes(metric.Name()) + + // Namespace? + if namespace != "" { + return namespace + "_" + metricName + } + + // Metric name starts with a digit? Prefix it with an underscore + if metricName != "" && unicode.IsDigit(rune(metricName[0])) { + metricName = "_" + metricName + } + + return metricName +} + +// Build a normalized name for the specified metric +func normalizeName(metric pmetric.Metric, namespace string) string { + // Split metric name in "tokens" (remove all non-alphanumeric) + nameTokens := strings.FieldsFunc( + metric.Name(), + func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }, + ) + + // Split unit at the '/' if any + unitTokens := strings.SplitN(metric.Unit(), "/", 2) + + // Main unit + // Append if not blank, doesn't contain '{}', and is not present in metric name already + if len(unitTokens) > 0 { + mainUnitOtel := strings.TrimSpace(unitTokens[0]) + if mainUnitOtel != "" && !strings.ContainsAny(mainUnitOtel, "{}") { + mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOtel)) + if mainUnitProm != "" && !contains(nameTokens, mainUnitProm) { + nameTokens = append(nameTokens, mainUnitProm) + } + } + + // Per unit + // Append if not blank, doesn't contain '{}', and is not present in metric name already + if len(unitTokens) > 1 && unitTokens[1] != "" { + perUnitOtel := strings.TrimSpace(unitTokens[1]) + if perUnitOtel != "" && !strings.ContainsAny(perUnitOtel, "{}") { + perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOtel)) + if perUnitProm != "" && !contains(nameTokens, perUnitProm) { + nameTokens = append(append(nameTokens, "per"), perUnitProm) + } + } + } + } + + // Append _total for Counters + if metric.Type() == pmetric.MetricTypeSum && metric.Sum().IsMonotonic() { + nameTokens = append(removeItem(nameTokens, "total"), "total") + } + + // Append _ratio for metrics with unit "1" + // Some Otel receivers improperly use unit "1" for counters of objects + // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions + // Until these issues have been fixed, we're appending `_ratio` for gauges ONLY + // Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons) + if metric.Unit() == "1" && metric.Type() == pmetric.MetricTypeGauge { + nameTokens = append(removeItem(nameTokens, "ratio"), "ratio") + } + + // Namespace? + if namespace != "" { + nameTokens = append([]string{namespace}, nameTokens...) + } + + // Build the string from the tokens, separated with underscores + normalizedName := strings.Join(nameTokens, "_") + + // Metric name cannot start with a digit, so prefix it with "_" in this case + if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) { + normalizedName = "_" + normalizedName + } + + return normalizedName +} + +// TrimPromSuffixes trims type and unit prometheus suffixes from a metric name. +// Following the [OpenTelemetry specs] for converting Prometheus Metric points to OTLP. +// +// [OpenTelemetry specs]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#metric-metadata +func TrimPromSuffixes(promName string, metricType pmetric.MetricType, unit string) string { + nameTokens := strings.Split(promName, "_") + if len(nameTokens) == 1 { + return promName + } + + nameTokens = removeTypeSuffixes(nameTokens, metricType) + nameTokens = removeUnitSuffixes(nameTokens, unit) + + return strings.Join(nameTokens, "_") +} + +func removeTypeSuffixes(tokens []string, metricType pmetric.MetricType) []string { + switch metricType { + case pmetric.MetricTypeSum: + // Only counters are expected to have a type suffix at this point. + // for other types, suffixes are removed during scrape. + return removeSuffix(tokens, "total") + default: + return tokens + } +} + +func removeUnitSuffixes(nameTokens []string, unit string) []string { + l := len(nameTokens) + unitTokens := strings.Split(unit, "_") + lu := len(unitTokens) + + if lu == 0 || l <= lu { + return nameTokens + } + + suffixed := true + for i := range unitTokens { + if nameTokens[l-i-1] != unitTokens[lu-i-1] { + suffixed = false + break + } + } + + if suffixed { + return nameTokens[:l-lu] + } + + return nameTokens +} + +func removeSuffix(tokens []string, suffix string) []string { + l := len(tokens) + if tokens[l-1] == suffix { + return tokens[:l-1] + } + + return tokens +} + +// Clean up specified string so it's Prometheus compliant +func CleanUpString(s string) string { + return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }), "_") +} + +func RemovePromForbiddenRunes(s string) string { + return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' && r != ':' }), "_") +} + +// Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit +// Returns the specified unit if not found in unitMap +func unitMapGetOrDefault(unit string) string { + if promUnit, ok := unitMap[unit]; ok { + return promUnit + } + return unit +} + +// Retrieve the Prometheus "per" unit corresponding to the specified "per" unit +// Returns the specified unit if not found in perUnitMap +func perUnitMapGetOrDefault(perUnit string) string { + if promPerUnit, ok := perUnitMap[perUnit]; ok { + return promPerUnit + } + return perUnit +} + +// Returns whether the slice contains the specified value +func contains(slice []string, value string) bool { + for _, sliceEntry := range slice { + if sliceEntry == value { + return true + } + } + return false +} + +// Remove the specified value from the slice +func removeItem(slice []string, value string) []string { + newSlice := make([]string, 0, len(slice)) + for _, sliceEntry := range slice { + if sliceEntry != value { + newSlice = append(newSlice, sliceEntry) + } + } + return newSlice +} diff --git a/pkg/promotel/internal/translator/prometheus/normalize_name_test.go b/pkg/promotel/internal/translator/prometheus/normalize_name_test.go new file mode 100644 index 0000000000..86510b03fc --- /dev/null +++ b/pkg/promotel/internal/translator/prometheus/normalize_name_test.go @@ -0,0 +1,201 @@ +package prometheus + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +func TestByte(t *testing.T) { + require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), "")) +} + +func TestByteCounter(t *testing.T) { + require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), "")) + require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), "")) +} + +func TestWhiteSpaces(t *testing.T) { + require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), "")) +} + +func TestNonStandardUnit(t *testing.T) { + require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "")) +} + +func TestNonStandardUnitCounter(t *testing.T) { + require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), "")) +} + +func TestBrokenUnit(t *testing.T) { + require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), "")) + require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), "")) + require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), "")) +} + +func TestBrokenUnitCounter(t *testing.T) { + require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), "")) + require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), "")) + require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), "")) +} + +func TestRatio(t *testing.T) { + require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), "")) + require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), "")) + require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), "")) +} + +func TestHertz(t *testing.T) { + require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), "")) +} + +func TestPer(t *testing.T) { + require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "")) + require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "")) +} + +func TestPercent(t *testing.T) { + require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), "")) + require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), "")) +} + +func TestEmpty(t *testing.T) { + require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), "")) + require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), "")) +} + +func TestUnsupportedRunes(t *testing.T) { + require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "")) + require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "")) + require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "")) +} + +func TestOtelReceivers(t *testing.T) { + require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), "")) + require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "")) + require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "")) + require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), "")) + require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), "")) + require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), "")) + require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), "")) + require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), "")) + require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), "")) + require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), "")) + require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), "")) + require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), "")) + require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), "")) + require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), "")) + require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), "")) + require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), "")) + require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), "")) + require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), "")) + require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), "")) + require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), "")) + require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), "")) + require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "")) +} + +func TestTrimPromSuffixes(t *testing.T) { + assert.Equal(t, "active_directory_ds_replication_network_io", TrimPromSuffixes("active_directory_ds_replication_network_io_bytes_total", pmetric.MetricTypeSum, "bytes")) + assert.Equal(t, "active_directory_ds_name_cache_hit_rate", TrimPromSuffixes("active_directory_ds_name_cache_hit_rate_percent", pmetric.MetricTypeGauge, "percent")) + assert.Equal(t, "active_directory_ds_ldap_bind_last_successful_time", TrimPromSuffixes("active_directory_ds_ldap_bind_last_successful_time_milliseconds", pmetric.MetricTypeGauge, "milliseconds")) + assert.Equal(t, "apache_requests", TrimPromSuffixes("apache_requests_total", pmetric.MetricTypeSum, "1")) + assert.Equal(t, "system_cpu_utilization", TrimPromSuffixes("system_cpu_utilization_ratio", pmetric.MetricTypeGauge, "ratio")) + assert.Equal(t, "mongodbatlas_process_journaling_data_files", TrimPromSuffixes("mongodbatlas_process_journaling_data_files_mebibytes", pmetric.MetricTypeGauge, "mebibytes")) + assert.Equal(t, "mongodbatlas_process_network_io", TrimPromSuffixes("mongodbatlas_process_network_io_bytes_per_second", pmetric.MetricTypeGauge, "bytes_per_second")) + assert.Equal(t, "mongodbatlas_process_oplog_rate", TrimPromSuffixes("mongodbatlas_process_oplog_rate_gibibytes_per_hour", pmetric.MetricTypeGauge, "gibibytes_per_hour")) + assert.Equal(t, "nsxt_node_memory_usage", TrimPromSuffixes("nsxt_node_memory_usage_kilobytes", pmetric.MetricTypeGauge, "kilobytes")) + assert.Equal(t, "redis_latest_fork", TrimPromSuffixes("redis_latest_fork_microseconds", pmetric.MetricTypeGauge, "microseconds")) + assert.Equal(t, "up", TrimPromSuffixes("up", pmetric.MetricTypeGauge, "")) + + // These are not necessarily valid OM units, only tested for the sake of completeness. + assert.Equal(t, "active_directory_ds_replication_sync_object_pending", TrimPromSuffixes("active_directory_ds_replication_sync_object_pending_total", pmetric.MetricTypeSum, "{objects}")) + assert.Equal(t, "apache_current", TrimPromSuffixes("apache_current_connections", pmetric.MetricTypeGauge, "connections")) + assert.Equal(t, "bigip_virtual_server_request_count", TrimPromSuffixes("bigip_virtual_server_request_count_total", pmetric.MetricTypeSum, "{requests}")) + assert.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", TrimPromSuffixes("mongodbatlas_process_db_query_targeting_scanned_per_returned", pmetric.MetricTypeGauge, "{scanned}/{returned}")) + assert.Equal(t, "nginx_connections_accepted", TrimPromSuffixes("nginx_connections_accepted", pmetric.MetricTypeGauge, "connections")) + assert.Equal(t, "apache_workers", TrimPromSuffixes("apache_workers_connections", pmetric.MetricTypeGauge, "connections")) + assert.Equal(t, "nginx", TrimPromSuffixes("nginx_requests", pmetric.MetricTypeGauge, "requests")) + + // Units shouldn't be trimmed if the unit is not a direct match with the suffix, i.e, a suffix "_seconds" shouldn't be removed if unit is "sec" or "s" + assert.Equal(t, "system_cpu_load_average_15m_ratio", TrimPromSuffixes("system_cpu_load_average_15m_ratio", pmetric.MetricTypeGauge, "1")) + assert.Equal(t, "mongodbatlas_process_asserts_per_second", TrimPromSuffixes("mongodbatlas_process_asserts_per_second", pmetric.MetricTypeGauge, "{assertions}/s")) + assert.Equal(t, "memcached_operation_hit_ratio_percent", TrimPromSuffixes("memcached_operation_hit_ratio_percent", pmetric.MetricTypeGauge, "%")) + assert.Equal(t, "active_directory_ds_replication_object_rate_per_second", TrimPromSuffixes("active_directory_ds_replication_object_rate_per_second", pmetric.MetricTypeGauge, "{objects}/s")) + assert.Equal(t, "system_disk_operation_time_seconds", TrimPromSuffixes("system_disk_operation_time_seconds_total", pmetric.MetricTypeSum, "s")) +} + +func TestNamespace(t *testing.T) { + require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space")) + require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space")) +} + +func TestCleanUpString(t *testing.T) { + require.Equal(t, "", CleanUpString("")) + require.Equal(t, "a_b", CleanUpString("a b")) + require.Equal(t, "hello_world", CleanUpString("hello, world!")) + require.Equal(t, "hello_you_2", CleanUpString("hello you 2")) + require.Equal(t, "1000", CleanUpString("$1000")) + require.Equal(t, "", CleanUpString("*+$^=)")) +} + +func TestUnitMapGetOrDefault(t *testing.T) { + require.Equal(t, "", unitMapGetOrDefault("")) + require.Equal(t, "seconds", unitMapGetOrDefault("s")) + require.Equal(t, "invalid", unitMapGetOrDefault("invalid")) +} + +func TestPerUnitMapGetOrDefault(t *testing.T) { + require.Equal(t, "", perUnitMapGetOrDefault("")) + require.Equal(t, "second", perUnitMapGetOrDefault("s")) + require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid")) +} + +func TestRemoveItem(t *testing.T) { + require.Equal(t, []string{}, removeItem([]string{}, "test")) + require.Equal(t, []string{}, removeItem([]string{}, "")) + require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d")) + require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "")) + require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c")) + require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b")) + require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a")) +} + +func TestBuildCompliantNameWithNormalize(t *testing.T) { + defer func() { normalizeNameGateEnabled = true }() + addUnitAndTypeSuffixes := true + require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", addUnitAndTypeSuffixes)) + require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", addUnitAndTypeSuffixes)) + require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", addUnitAndTypeSuffixes)) + require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", addUnitAndTypeSuffixes)) + require.Equal(t, "foo_bar", BuildCompliantName(createGauge(":foo::bar", ""), "", addUnitAndTypeSuffixes)) + require.Equal(t, "foo_bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", addUnitAndTypeSuffixes)) +} + +func TestBuildCompliantNameWithSuffixesFeatureGateDisabled(t *testing.T) { + normalizeNameGateEnabled = false + defer func() { normalizeNameGateEnabled = true }() + addUnitAndTypeSuffixes := true + require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", addUnitAndTypeSuffixes)) + require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", addUnitAndTypeSuffixes)) + require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", addUnitAndTypeSuffixes)) + require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", addUnitAndTypeSuffixes)) + require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", addUnitAndTypeSuffixes)) + require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", addUnitAndTypeSuffixes)) + require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", addUnitAndTypeSuffixes)) +} + +func TestBuildCompliantNameWithoutSuffixes(t *testing.T) { + normalizeNameGateEnabled = false + defer func() { normalizeNameGateEnabled = true }() + addUnitAndTypeSuffixes := false + require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", addUnitAndTypeSuffixes)) + require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", addUnitAndTypeSuffixes)) + require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", addUnitAndTypeSuffixes)) + require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", addUnitAndTypeSuffixes)) + require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", addUnitAndTypeSuffixes)) + require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", addUnitAndTypeSuffixes)) + require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", addUnitAndTypeSuffixes)) +} diff --git a/pkg/promotel/internal/translator/prometheus/package_test.go b/pkg/promotel/internal/translator/prometheus/package_test.go new file mode 100644 index 0000000000..3c5d1d8ac0 --- /dev/null +++ b/pkg/promotel/internal/translator/prometheus/package_test.go @@ -0,0 +1,11 @@ +package prometheus + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/pkg/promotel/internal/translator/prometheus/testutils_test.go b/pkg/promotel/internal/translator/prometheus/testutils_test.go new file mode 100644 index 0000000000..25db55245d --- /dev/null +++ b/pkg/promotel/internal/translator/prometheus/testutils_test.go @@ -0,0 +1,31 @@ +package prometheus + +import ( + "go.opentelemetry.io/collector/pdata/pmetric" +) + +var ilm pmetric.ScopeMetrics + +func init() { + metrics := pmetric.NewMetrics() + resourceMetrics := metrics.ResourceMetrics().AppendEmpty() + ilm = resourceMetrics.ScopeMetrics().AppendEmpty() +} + +// Returns a new Metric of type "Gauge" with specified name and unit +func createGauge(name string, unit string) pmetric.Metric { + gauge := ilm.Metrics().AppendEmpty() + gauge.SetName(name) + gauge.SetUnit(unit) + gauge.SetEmptyGauge() + return gauge +} + +// Returns a new Metric of type Monotonic Sum with specified name and unit +func createCounter(name string, unit string) pmetric.Metric { + counter := ilm.Metrics().AppendEmpty() + counter.SetEmptySum().SetIsMonotonic(true) + counter.SetName(name) + counter.SetUnit(unit) + return counter +} diff --git a/pkg/promotel/internal/translator/prometheus/unit_to_ucum.go b/pkg/promotel/internal/translator/prometheus/unit_to_ucum.go new file mode 100644 index 0000000000..6f75a33434 --- /dev/null +++ b/pkg/promotel/internal/translator/prometheus/unit_to_ucum.go @@ -0,0 +1,86 @@ +package prometheus + +import "strings" + +var wordToUCUM = map[string]string{ + // Time + "days": "d", + "hours": "h", + "minutes": "min", + "seconds": "s", + "milliseconds": "ms", + "microseconds": "us", + "nanoseconds": "ns", + + // Bytes + "bytes": "By", + "kibibytes": "KiBy", + "mebibytes": "MiBy", + "gibibytes": "GiBy", + "tibibytes": "TiBy", + "kilobytes": "KBy", + "megabytes": "MBy", + "gigabytes": "GBy", + "terabytes": "TBy", + + // SI + "meters": "m", + "volts": "V", + "amperes": "A", + "joules": "J", + "watts": "W", + "grams": "g", + + // Misc + "celsius": "Cel", + "hertz": "Hz", + "ratio": "1", + "percent": "%", +} + +// The map that translates the "per" unit +// Example: per_second (singular) => /s +var perWordToUCUM = map[string]string{ + "second": "s", + "minute": "m", + "hour": "h", + "day": "d", + "week": "w", + "month": "mo", + "year": "y", +} + +// UnitWordToUCUM converts english unit words to UCUM units: +// https://ucum.org/ucum#section-Alphabetic-Index-By-Symbol +// It also handles rates, such as meters_per_second, by translating the first +// word to UCUM, and the "per" word to UCUM. It joins them with a "/" between. +func UnitWordToUCUM(unit string) string { + unitTokens := strings.SplitN(unit, "_per_", 2) + if len(unitTokens) == 0 { + return "" + } + ucumUnit := wordToUCUMOrDefault(unitTokens[0]) + if len(unitTokens) > 1 && unitTokens[1] != "" { + ucumUnit += "/" + perWordToUCUMOrDefault(unitTokens[1]) + } + return ucumUnit +} + +// wordToUCUMOrDefault retrieves the Prometheus "basic" unit corresponding to +// the specified "basic" unit. Returns the specified unit if not found in +// wordToUCUM. +func wordToUCUMOrDefault(unit string) string { + if promUnit, ok := wordToUCUM[unit]; ok { + return promUnit + } + return unit +} + +// perWordToUCUMOrDefault retrieve the Prometheus "per" unit corresponding to +// the specified "per" unit. Returns the specified unit if not found in perWordToUCUM. +func perWordToUCUMOrDefault(perUnit string) string { + if promPerUnit, ok := perWordToUCUM[perUnit]; ok { + return promPerUnit + } + return perUnit +} diff --git a/pkg/promotel/internal/translator/prometheus/unit_to_ucum_test.go b/pkg/promotel/internal/translator/prometheus/unit_to_ucum_test.go new file mode 100644 index 0000000000..3301a685cd --- /dev/null +++ b/pkg/promotel/internal/translator/prometheus/unit_to_ucum_test.go @@ -0,0 +1,57 @@ +package prometheus + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUnitWordToUCUM(t *testing.T) { + for _, tc := range []struct { + input string + expected string + }{ + { + input: "", + expected: "", + }, + { + input: "days", + expected: "d", + }, + { + input: "seconds", + expected: "s", + }, + { + input: "kibibytes", + expected: "KiBy", + }, + { + input: "volts", + expected: "V", + }, + { + input: "bananas_per_day", + expected: "bananas/d", + }, + { + input: "meters_per_hour", + expected: "m/h", + }, + { + input: "ratio", + expected: "1", + }, + { + input: "percent", + expected: "%", + }, + } { + t.Run(fmt.Sprintf("input: \"%v\"", tc.input), func(t *testing.T) { + got := UnitWordToUCUM(tc.input) + assert.Equal(t, tc.expected, got) + }) + } +} diff --git a/pkg/promotel/promotel_test.go b/pkg/promotel/promotel_test.go new file mode 100644 index 0000000000..222e190d2e --- /dev/null +++ b/pkg/promotel/promotel_test.go @@ -0,0 +1,226 @@ +package promotel_test + +import ( + "context" + "fmt" + "math/rand" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheus/scrape" +) + +// TestScrapeLoopScrapeAndReport exercises scrapeAndReport with various scenarios +// (successful scrape, failed scrape, forced error, empty body leading to staleness, etc.). +func TestScrapeLoopScrapeAndReport(t *testing.T) { + appendable := &collectResultAppendable{&testAppender{}} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + reg := prometheus.NewRegistry() + sl, err := scrape.NewGathererLoop(ctx, nil, appendable, reg, reg, 10*time.Millisecond) + require.NoError(t, err) + + start := time.Now() + sl.ScrapeAndReport(time.Time{}, start, nil) + // The collectResultAppender holds all appended samples. Check the last appended + // for staleness or actual data, depending on if the scrape was declared OK. + allSamples := appendable.resultFloats + // We expect at least one normal sample plus the reported samples. + require.NotEmpty(t, allSamples, "Expected to see appended samples.") + + // reset the appender + appendable.testAppender = &testAppender{} + // create counter metric + counter := prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "metric_a", + Help: "metric_a help", + }, []string{"label_a"}) + reg.MustRegister(counter) + counter.WithLabelValues("value_a").Add(42) + + mfs, err := reg.Gather() + require.NoError(t, err) + // verify that metric_a is present in Gatherer results + var foundMetric bool + for _, mf := range mfs { + if mf.GetName() == "metric_a" { + // verify metrics value + require.Len(t, mf.GetMetric(), 1) + require.Equal(t, "value_a", mf.GetMetric()[0].GetLabel()[0].GetValue()) + require.Equal(t, 42.0, mf.GetMetric()[0].GetCounter().GetValue()) // nolint + foundMetric = true + break + } + } + require.True(t, foundMetric, "Expected to see the 'metric_a' counter metric.") + + sl.ScrapeAndReport(time.Time{}, start, nil) + // Get all appended samples + allSamples = appendable.resultFloats + // verify that the counter metric 'metric_a' was reported + var found bool + for _, s := range allSamples { + if s.metric.Get("__name__") == "metric_a" && s.metric.Get("label_a") == "value_a" { + found = true + require.Equal(t, 42.0, s.f) // nolint + } + } + require.True(t, found, "Expected to see the 'metric_a' counter metric.") +} + +type floatSample struct { + metric labels.Labels + t int64 + f float64 +} + +type histogramSample struct { + t int64 + h *histogram.Histogram + fh *histogram.FloatHistogram +} + +type collectResultAppendable struct { + *testAppender +} + +func (a *collectResultAppendable) Appender(_ context.Context) storage.Appender { + return a +} + +// testAppender records all samples that were added through the appender. +// It can be used as its zero value or be backed by another appender it writes samples through. +type testAppender struct { + mtx sync.Mutex + + next storage.Appender + resultFloats []floatSample + pendingFloats []floatSample + rolledbackFloats []floatSample + resultHistograms []histogramSample + pendingHistograms []histogramSample + rolledbackHistograms []histogramSample + resultExemplars []exemplar.Exemplar + pendingExemplars []exemplar.Exemplar + resultMetadata []metadata.Metadata + pendingMetadata []metadata.Metadata +} + +func (a *testAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.pendingFloats = append(a.pendingFloats, floatSample{ + metric: lset, + t: t, + f: v, + }) + + if ref == 0 { + ref = storage.SeriesRef(rand.Uint64()) + } + if a.next == nil { + return ref, nil + } + + ref, err := a.next.Append(ref, lset, t, v) + if err != nil { + return 0, err + } + return ref, err +} + +func (a *testAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.pendingExemplars = append(a.pendingExemplars, e) + if a.next == nil { + return 0, nil + } + + return a.next.AppendExemplar(ref, l, e) +} + +func (a *testAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t}) + if a.next == nil { + return 0, nil + } + + return a.next.AppendHistogram(ref, l, t, h, fh) +} + +func (a *testAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.pendingMetadata = append(a.pendingMetadata, m) + if ref == 0 { + ref = storage.SeriesRef(rand.Uint64()) + } + if a.next == nil { + return ref, nil + } + + return a.next.UpdateMetadata(ref, l, m) +} + +func (a *testAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { + return a.Append(ref, l, ct, 0.0) +} + +func (a *testAppender) Commit() error { + a.mtx.Lock() + defer a.mtx.Unlock() + a.resultFloats = append(a.resultFloats, a.pendingFloats...) + a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...) + a.resultHistograms = append(a.resultHistograms, a.pendingHistograms...) + a.resultMetadata = append(a.resultMetadata, a.pendingMetadata...) + a.pendingFloats = nil + a.pendingExemplars = nil + a.pendingHistograms = nil + a.pendingMetadata = nil + if a.next == nil { + return nil + } + return a.next.Commit() +} + +func (a *testAppender) Rollback() error { + a.mtx.Lock() + defer a.mtx.Unlock() + a.rolledbackFloats = a.pendingFloats + a.rolledbackHistograms = a.pendingHistograms + a.pendingFloats = nil + a.pendingHistograms = nil + if a.next == nil { + return nil + } + return a.next.Rollback() +} + +func (a *testAppender) String() string { + var sb strings.Builder + for _, s := range a.resultFloats { + sb.WriteString(fmt.Sprintf("committed: %s %f %d\n", s.metric, s.f, s.t)) + } + for _, s := range a.pendingFloats { + sb.WriteString(fmt.Sprintf("pending: %s %f %d\n", s.metric, s.f, s.t)) + } + for _, s := range a.rolledbackFloats { + sb.WriteString(fmt.Sprintf("rolledback: %s %f %d\n", s.metric, s.f, s.t)) + } + return sb.String() +} diff --git a/pkg/promotel/receiver.go b/pkg/promotel/receiver.go new file mode 100644 index 0000000000..56e3efacd1 --- /dev/null +++ b/pkg/promotel/receiver.go @@ -0,0 +1,67 @@ +package promotel + +import ( + "context" + + "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal" + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheus/scrape" + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheusreceiver" +) + +type Runnable interface { + Start(context.Context) error + Close() error +} + +type MetricReceiver interface { + Runnable +} + +type metricReceiver struct { + factory receiver.Factory + host component.Host + receiver receiver.Metrics +} + +func (p *metricReceiver) Start(ctx context.Context) error { + return p.receiver.Start(ctx, p.host) +} + +func (p *metricReceiver) Close() error { + return p.receiver.Shutdown(context.Background()) +} + +func NewMetricReceiver(config ReceiverConfig, g prometheus.Gatherer, consumerFunc consumer.ConsumeMetricsFunc, logger *zap.Logger) (Runnable, error) { + // Scrape from the provided gatherer + scrape.SetDefaultGatherer(g) + + factory := prometheusreceiver.NewFactory() + // Creates a metrics receiver with the context, settings, config, and consumer + receiver, err := factory.CreateMetrics( + context.Background(), + internal.NewReceiverSettings(logger), + config, + internal.NewConsumer(consumerFunc)) + if err != nil { + return nil, err + } + // Creates a no-operation host for the receiver + host := internal.NewNopHost() + return &metricReceiver{factory, host, receiver}, nil +} + +func NewDebugMetricReceiver(config ReceiverConfig, g prometheus.Gatherer, logger *zap.Logger) (MetricReceiver, error) { + debugExporter := internal.NewDebugExporter(logger) + // Creates a no-operation consumer + return NewMetricReceiver(config, g, func(ctx context.Context, md pmetric.Metrics) error { + // Writes metrics data to stdout + return debugExporter.Export(md) + }, logger) +} diff --git a/pkg/promotel/receiver_test.go b/pkg/promotel/receiver_test.go new file mode 100644 index 0000000000..8acb576251 --- /dev/null +++ b/pkg/promotel/receiver_test.go @@ -0,0 +1,54 @@ +package promotel_test + +import ( + "context" + "path/filepath" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver/receivertest" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel" + + "github.com/smartcontractkit/chainlink-common/pkg/promotel/internal/prometheusreceiver" +) + +// TestPrometheusReceiver verifies the initialization, startup, and shutdown +// processes of the Prometheus receiver. It ensures that no errors occur when +// creating a metrics receiver from a loaded configuration, starting it, and +// gracefully stopping it. +func TestPrometheusReceiver(t *testing.T) { + // Load configuration from a YAML file + configFile := filepath.Join("testdata", "promconfig.yaml") + testConfig, err := promotel.LoadTestConfig(configFile, "withOnlyScrape") + require.NoError(t, err) + // Creates a new Prometheus receiver factory + factory := prometheusreceiver.NewFactory() + // Creates a metrics receiver with the context, settings, config, and consumer + receiver, err := factory.CreateMetrics(context.Background(), receivertest.NewNopSettings(), testConfig, consumertest.NewNop()) + // Verifies the receiver was created without error + require.NoError(t, err) + // Creates a no-operation host for the receiver + host := componenttest.NewNopHost() + // Ensures no error occurred before continuing + require.NoError(t, err) + // Starts the receiver with the provided host + require.NoError(t, receiver.Start(context.Background(), host)) + // Gracefully shuts down the receiver + require.NoError(t, receiver.Shutdown(context.Background())) +} + +func TestMetricReceiver(t *testing.T) { + configFile := filepath.Join("testdata", "promconfig.yaml") + testConfig, err := promotel.LoadTestConfig(configFile, "withOnlyScrape") + require.NoError(t, err) + noopConsumerFunc := func(context.Context, pmetric.Metrics) error { return nil } + receiver, err := promotel.NewMetricReceiver(testConfig, prometheus.DefaultGatherer, noopConsumerFunc, nil) + require.NoError(t, err) + require.NoError(t, receiver.Start(context.Background())) + require.NoError(t, receiver.Close()) +} diff --git a/pkg/promotel/testdata/exporter-config.yaml b/pkg/promotel/testdata/exporter-config.yaml new file mode 100644 index 0000000000..d26631053e --- /dev/null +++ b/pkg/promotel/testdata/exporter-config.yaml @@ -0,0 +1,32 @@ +endpoint: "1.2.3.4:1234" +compression: "gzip" +tls: + ca_file: /var/lib/mycert.pem +timeout: 10s +sending_queue: + enabled: true + num_consumers: 2 + queue_size: 10 +retry_on_failure: + enabled: true + initial_interval: 10s + randomization_factor: 0.7 + multiplier: 1.3 + max_interval: 60s + max_elapsed_time: 10m +batcher: + enabled: true + flush_timeout: 200ms + min_size_items: 1000 + max_size_items: 10000 +auth: + authenticator: nop +headers: + "can you have a . here?": "F0000000-0000-0000-0000-000000000000" + header1: "234" + another: "somevalue" +keepalive: + time: 20s + timeout: 30s + permit_without_stream: true +balancer_name: "round_robin" diff --git a/pkg/promotel/testdata/promconfig.yaml b/pkg/promotel/testdata/promconfig.yaml new file mode 100644 index 0000000000..9c708ed58d --- /dev/null +++ b/pkg/promotel/testdata/promconfig.yaml @@ -0,0 +1,22 @@ +prometheus: + target_allocator: + endpoint: http://localhost:8080 + timeout: 5s + tls: + cert_file: "client.crt" + interval: 30s + collector_id: collector-1 +prometheus/withScrape: + target_allocator: + endpoint: http://localhost:8080 + interval: 30s + collector_id: collector-1 + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s +prometheus/withOnlyScrape: + config: + scrape_configs: + - job_name: 'demo' + scrape_interval: 5s