diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index dd1060749a8..d5f73ba762d 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -64,6 +64,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 + uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 with: sarif_file: results.sarif diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index 07e12ba178f..2edfa29a2fb 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -93,15 +93,15 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 + uses: github/codeql-action/init@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 with: languages: go - name: Autobuild - uses: github/codeql-action/autobuild@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 + uses: github/codeql-action/autobuild@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 + uses: github/codeql-action/analyze@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 build: diff --git a/CHANGELOG.md b/CHANGELOG.md index 91507d24650..5432fc045dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,12 @@ * [FEATURE] Query Frontend: Add dynamic interval size for query splitting. This is enabled by configuring experimental flags `querier.max-shards-per-query` and/or `querier.max-fetched-data-duration-per-query`. The split interval size is dynamically increased to maintain a number of shards and total duration fetched below the configured values. #6458 * [FEATURE] Querier/Ruler: Add `query_partial_data` and `rules_partial_data` limits to allow queries/rules to be evaluated with data from a single zone, if other zones are not available. #6526 * [FEATURE] Update prometheus alertmanager version to v0.28.0 and add new integration msteamsv2, jira, and rocketchat. #6590 +* [FEATURE] Ingester/StoreGateway: Add `ResourceMonitor` module in Cortex, and add `ResourceBasedLimiter` in Ingesters and StoreGateways. #6674 * [FEATURE] Ingester: Support out-of-order native histogram ingestion. It automatically enabled when `-ingester.out-of-order-time-window > 0` and `-blocks-storage.tsdb.enable-native-histograms=true`. #6626 #6663 +* [FEATURE] Ruler: Add support for percentage based sharding for rulers. #6680 +* [FEATURE] Ruler: Add support for group labels. #6665 +* [ENHANCEMENT] Querier: Support query parameters to metadata api (/api/v1/metadata) to allow user to limit metadata to return. #6681 +* [ENHANCEMENT] Ingester: Add a `cortex_ingester_active_native_histogram_series` metric to track # of active NH series. #6695 * [ENHANCEMENT] Query Frontend: Add new limit `-frontend.max-query-response-size` for total query response size after decompression in query frontend. #6607 * [ENHANCEMENT] Alertmanager: Add nflog and silences maintenance metrics. #6659 * [ENHANCEMENT] Querier: limit label APIs to query only ingesters if `start` param is not been specified. #6618 @@ -30,6 +35,7 @@ * [BUGFIX] Ingester: Add check to avoid query 5xx when closing tsdb. #6616 * [BUGFIX] Querier: Fix panic when marshaling QueryResultRequest. #6601 * [BUGFIX] Ingester: Avoid resharding for query when restart readonly ingesters. #6642 +* [BUGFIX] Query Frontend: Fix query frontend per `user` metrics clean up. #6698 ## 1.19.0 2025-02-27 diff --git a/README.md b/README.md index 0772c4ff079..515b199a295 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,8 @@ Join us in shaping the future of Cortex, and let's build something amazing toget ### Talks +- Apr 2025 KubeCon talk "Cortex: Insights, Updates and Roadmap" ([video](https://youtu.be/3aUg2qxfoZU), [slides](https://static.sched.com/hosted_files/kccnceu2025/6c/Cortex%20Talk%20KubeCon%20EU%202025.pdf)) +- Apr 2025 KubeCon talk "Taming 50 Billion Time Series: Operating Global-Scale Prometheus Deployments on Kubernetes" ([video](https://youtu.be/OqLpKJwKZlk), [slides](https://static.sched.com/hosted_files/kccnceu2025/b2/kubecon%20-%2050b%20-%20final.pdf)) - Nov 2024 KubeCon talk "Cortex Intro: Multi-Tenant Scalable Prometheus" ([video](https://youtu.be/OGAEWCoM6Tw), [slides](https://static.sched.com/hosted_files/kccncna2024/0f/Cortex%20Talk%20KubeCon%20US%202024.pdf)) - Mar 2024 KubeCon talk "Cortex Intro: Multi-Tenant Scalable Prometheus" ([video](https://youtu.be/by538PPSPQ0), [slides](https://static.sched.com/hosted_files/kccnceu2024/a1/Cortex%20Talk%20KubeConEU24.pptx.pdf)) - Apr 2023 KubeCon talk "How to Run a Rock Solid Multi-Tenant Prometheus" ([video](https://youtu.be/Pl5hEoRPLJU), [slides](https://static.sched.com/hosted_files/kccnceu2023/49/Kubecon2023.pptx.pdf)) diff --git a/cmd/cortex/main.go b/cmd/cortex/main.go index a5742d0f2ed..c2702d1338a 100644 --- a/cmd/cortex/main.go +++ b/cmd/cortex/main.go @@ -18,6 +18,7 @@ import ( "github.com/prometheus/client_golang/prometheus" collectorversion "github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/common/version" + _ "go.uber.org/automaxprocs" "gopkg.in/yaml.v2" "github.com/cortexproject/cortex/pkg/cortex" diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index 31005f0eaec..58b4c08eb3b 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -349,6 +349,21 @@ store_gateway: # CLI flag: -store-gateway.disabled-tenants [disabled_tenants: | default = ""] + instance_limits: + # EXPERIMENTAL: Max CPU utilization that this ingester can reach before + # rejecting new query request (across all tenants) in percentage, between 0 + # and 1. monitored_resources config must include the resource type. 0 to + # disable. + # CLI flag: -store-gateway.instance-limits.cpu-utilization + [cpu_utilization: | default = 0] + + # EXPERIMENTAL: Max heap utilization that this ingester can reach before + # rejecting new query request (across all tenants) in percentage, between 0 + # and 1. monitored_resources config must include the resource type. 0 to + # disable. + # CLI flag: -store-gateway.instance-limits.heap-utilization + [heap_utilization: | default = 0] + hedged_request: # If true, hedged requests are applied to object store calls. It can help # with reducing tail latency. diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index c411d592fae..252887a15e5 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -68,6 +68,12 @@ Where default_value is the value to use if the environment variable is undefined # CLI flag: -http.prefix [http_prefix: | default = "/api/prom"] +# Comma-separated list of resources to monitor. Supported values are cpu and +# heap, which tracks metrics from github.com/prometheus/procfs and +# runtime/metrics that are close estimates. Empty string to disable. +# CLI flag: -monitored.resources +[monitored_resources: | default = ""] + api: # Use GZIP compression for API responses. Some endpoints serve large YAML or # JSON blobs which can benefit from compression. @@ -3197,6 +3203,20 @@ lifecycler: [upload_compacted_blocks_enabled: | default = true] instance_limits: + # EXPERIMENTAL: Max CPU utilization that this ingester can reach before + # rejecting new query request (across all tenants) in percentage, between 0 + # and 1. monitored_resources config must include the resource type. 0 to + # disable. + # CLI flag: -ingester.instance-limits.cpu-utilization + [cpu_utilization: | default = 0] + + # EXPERIMENTAL: Max heap utilization that this ingester can reach before + # rejecting new query request (across all tenants) in percentage, between 0 + # and 1. monitored_resources config must include the resource type. 0 to + # disable. + # CLI flag: -ingester.instance-limits.heap-utilization + [heap_utilization: | default = 0] + # Max ingestion rate (samples/sec) that ingester will accept. This limit is # per-ingester, not per-tenant. Additional push requests will be rejected. # Current ingestion rate is computed as exponentially weighted moving average, @@ -3635,9 +3655,10 @@ query_rejection: # The default tenant's shard size when the shuffle-sharding strategy is used by # ruler. When this setting is specified in the per-tenant overrides, a value of -# 0 disables shuffle sharding for the tenant. +# 0 disables shuffle sharding for the tenant. If the value is < 1 the shard size +# will be a percentage of the total rulers. # CLI flag: -ruler.tenant-shard-size -[ruler_tenant_shard_size: | default = 0] +[ruler_tenant_shard_size: | default = 0] # Maximum number of rules per rule group per-tenant. 0 to disable. # CLI flag: -ruler.max-rules-per-rule-group @@ -5856,6 +5877,21 @@ sharding_ring: # CLI flag: -store-gateway.disabled-tenants [disabled_tenants: | default = ""] +instance_limits: + # EXPERIMENTAL: Max CPU utilization that this ingester can reach before + # rejecting new query request (across all tenants) in percentage, between 0 + # and 1. monitored_resources config must include the resource type. 0 to + # disable. + # CLI flag: -store-gateway.instance-limits.cpu-utilization + [cpu_utilization: | default = 0] + + # EXPERIMENTAL: Max heap utilization that this ingester can reach before + # rejecting new query request (across all tenants) in percentage, between 0 + # and 1. monitored_resources config must include the resource type. 0 to + # disable. + # CLI flag: -store-gateway.instance-limits.heap-utilization + [heap_utilization: | default = 0] + hedged_request: # If true, hedged requests are applied to object store calls. It can help with # reducing tail latency. diff --git a/docs/configuration/v1-guarantees.md b/docs/configuration/v1-guarantees.md index bf3beed915e..0922b47859f 100644 --- a/docs/configuration/v1-guarantees.md +++ b/docs/configuration/v1-guarantees.md @@ -123,3 +123,8 @@ Currently experimental features are: - Query-frontend: dynamic query splits - `querier.max-shards-per-query` (int) CLI flag - `querier.max-fetched-data-duration-per-query` (duration) CLI flag +- Ingester/Store-Gateway: Resource-based throttling + - `-ingester.instance-limits.cpu-utilization` + - `-ingester.instance-limits.heap-utilization` + - `-store-gateway.instance-limits.cpu-utilization` + - `-store-gateway.instance-limits.heap-utilization` \ No newline at end of file diff --git a/docs/guides/protecting-cortex-from-heavy-queries.md b/docs/guides/protecting-cortex-from-heavy-queries.md new file mode 100644 index 00000000000..4f76ef18824 --- /dev/null +++ b/docs/guides/protecting-cortex-from-heavy-queries.md @@ -0,0 +1,56 @@ +--- +title: "Protecting Cortex from Heavy Queries" +linkTitle: "Protecting Cortex from Heavy Queries" +weight: 11 +slug: protecting-cortex-from-heavy-queries +--- + +PromQL is powerful, and is able to result in query requests that have very wide range of data fetched and samples processed. Heavy queries can cause: + +1. CPU on any query component to be partially exhausted, increasing latency and causing incoming queries to queue up with high chance of time-out. +2. CPU on any query component to be fully exhausted, causing GC to slow down leading to the pod being out-of-memory and killed. +3. Heap memory on any query component to be exhausted, leading to the pod being out-of-memory and killed. + +It's important to protect Cortex components by setting appropriate limits and throttling configurations based on your infrastructure and data ingested by the customers. + +## Static limits + +There are number of static limits that you could configure to block heavy queries from running. + +### Max outstanding requests per tenant + +See https://cortexmetrics.io/docs/configuration/configuration-file/#query_frontend_config:~:text=max_outstanding_requests_per_tenant for details. + +### Max data bytes fetched per (sharded) query + +See https://cortexmetrics.io/docs/configuration/configuration-file/#query_frontend_config:~:text=max_fetched_data_bytes_per_query for details. + +### Max series fetched per (sharded) query + +See https://cortexmetrics.io/docs/configuration/configuration-file/#query_frontend_config:~:text=max_fetched_series_per_query for details. + +### Max chunks fetched per (sharded) query + +See https://cortexmetrics.io/docs/configuration/configuration-file/#query_frontend_config:~:text=max_fetched_chunk_bytes_per_query for details. + +### Max samples fetched per (sharded) query + +See https://cortexmetrics.io/docs/configuration/configuration-file/#querier_config:~:text=max_samples for details. + +## Resource-based throttling (Experimental) + +Although the static limits are able to protect Cortex components from specific query patterns, they are not generic enough to cover different combinations of bad query patterns. For example, what if the query fetches relatively large postings, series and chunks that are slightly below the individual limits? For a more generic solution, you can enable resource-based throttling by setting CPU and heap utilization thresholds. + +Currently, it only throttles incoming query requests with error code 429 (too many requests) when the resource usage breaches the configured thresholds. + +For example, the following configuration will start throttling query requests if either CPU or heap utilization is above 80%, leaving 20% of room for inflight requests. + +``` +target: ingester +monitored_resources: cpu,heap +instance_limits: + cpu_utilization: 0.8 + heap_utilization: 0.8 +``` + +See https://cortexmetrics.io/docs/configuration/configuration-file/:~:text=instance_limits for details. \ No newline at end of file diff --git a/docs/proposals/parquet-storage.md b/docs/proposals/parquet-storage.md new file mode 100644 index 00000000000..154207be133 --- /dev/null +++ b/docs/proposals/parquet-storage.md @@ -0,0 +1,139 @@ +--- +title: "Parquet-based Storage" +linkTitle: "Parquet-based Storage" +weight: 1 +slug: parquet-storage +--- + +- Author: [Alan Protasio](https://github.com/alanprot), [Ben Ye](https://github.com/yeya24) +- Date: April 2025 +- Status: Proposed + +## Background + +Since the introduction of Block Storage in Cortex, TSDB format and Store Gateway is the de-facto way to query long term data on object storage. However, it presents several significant challenges: + +### TSDB Format Limitations + +TSDB format, while efficient for write-heavy workloads on local SSDs, is not designed for object storage: +- Index relies heavily on random reads to serve queries, where each random read becomes a request to object store +- In order to reduce requests to object store, requests needs to be merged, leading to higher overfetch +- Index relies on postings, which can be a huge bottleneck for high cardinality data + +### Store Gateway Operational Challenges + +Store Gateway is originally introduced in [Thanos](https://thanos.io/). Both Cortex and Thanos community have been collaborating to add a lot of optimizations to Store Gateway. However, it has its own problems related to the design. + +1. Resource Intensive + - Requires significant local disk space to store index headers + - High memory utilization due to index header mmap + - Often needs over-provisioning to handle query spikes + +2. State Management and Scaling Difficulties + - Requires complex data sharding when scaling. Often causing issues such as consistency check failure. Hard to configure for users + - Initial sync causes long startup time. This affects service availability on both scaling and failure recovery scenario + +3. Query Inefficiencies + - Attempts to minimize storage requests often lead to overfetching, causing high bandwidth usage + - Complex caching logic with varying effectiveness. Latency varies a lot when cache miss + - Processes single block with one goroutine, leading to high latency for large blocks and cannot scale without complex data partitioning + +### Why Parquet? + +[Apache Parquet](https://parquet.apache.org/) is a columnar storage format designed specifically for efficient data storage and retrieval from object storage systems. It offers several key advantages that directly address the problems we face with TSDB and Store Gateway: + +- Data organized by columns rather than rows, reduces number of requests to object storage as only limited IO is required to fetch the whole column +- Rich file metadata and index, no local state like index header required to query the data, making it stateless +- Advanced compression techniques reduce storage costs and improve query performance +- Parallel processing friendly using Parquet Row Group + +There are other benefits of Parquet formats, but they are not directly related to the proposal: + +- Wide ecosystem and tooling support +- Column pruning opportunity using projection pushdown + +## Out of Scope + +- Allow Ingester and Compactor to create Parquet files instead of TSDB blocks directly. This could be in the future roadmap but this proposal only focuses on converting and querying Parquet files. + +## Proposed Design + +### Components + +There are 2 new Cortex components/modules introduced in this design. + +#### 1. Parquet Converter + +Parquet Converter is a new component that converts TSDB blocks on object store to Parquet file format. + +It is similar to compactor, however, it only converts single block. The converted Parquet files will be stored in the same TSDB block folder so that the lifecycle of Parquet file will be managed together with the block. + +Only certain blocks can be configured to convert to Parquet file and it can be block duration based, for example we only convert if block duration is >= 12h. + +#### 2. Parquet Queryable + +Similar to the existing `distributorQueryable` and `blockStorageQueryable`, Parquet queryable is a queryable implementation which allows Cortex to query parquet files and can be used in both Cortex Querier and Ruler. + +If Parquet queryable is enabled, block storage queryable will be disabled and Cortex querier will not query Store Gateway anymore. `distributorQueryable` remains unchanged so it still queries Ingesters. + +Parquet queryable uses bucket index to discovers parquet files in object storage. The bucket index is the same as the existing TSDB bucket index file, but using a different name `bucket-index-parquet.json.gz`. It is updated periodically by Cortex Compactor/Parquet Converter if parquet storage is enabled. + +Cortex querier remains a stateless component when Parquet queryable is enabled. + +### Architecture + +``` +┌──────────┐ ┌─────────────┐ ┌──────────────┐ +│ Ingester │───>│ TSDB │───>│ Parquet │ +└──────────┘ │ Blocks │ │ Converter │ + └─────────────┘ └──────────────┘ + │ + v +┌──────────┐ ┌─────────────┐ ┌──────────────┐ +│ Query │───>│ Parquet │───>│ Parquet │ +│ Frontend │ │ Querier │ │ Files │ +└──────────┘ └─────────────┘ └──────────────┘ +``` + +### Data Format + +Parquet file is converted from TSDB block so it follows the same time range constraint. + +If the largest block is 1 day then parquet file can go up to 1 day. Max block range is configurable in Cortex but default value is 24h. So following schema will use 24h as example. + +#### Schema Overview + +The Parquet format consists of two types of files: + +1. **Labels Parquet File** + - Each row represents a unique time series + - Each column corresponds to a label name (e.g., `__name__`, `label1`, ..., `labelN`) + - Row groups are sorted by `__name__` alphabetically in ascending order + +2. **Chunks Parquet File** + - Maintains row and row group order matching the Labels file + - Contains multiple chunk columns for time-series data ordered by time. With 3 chunk columns for example, each column covers 8h of chunks: 0-8h, 8h-16h, 16-24h. It is possible that a single TSDB chunk spans over time ranges of 2 columns and Parquet file writer needs to split and re-encode chunks for each chunk column. + +#### Column Specifications + +| Column Name | Description | Type | Encoding/Compression/skipPageBounds | Required | +|------------|-------------|------|-----------------------------------|-----------| +| `s_hash` | Hash of all labels | INT64 | None/Zstd/Yes | No | +| `s_col_indexes` | Bitmap indicating which columns store the label set for this row (series) | ByteArray (bitmap) | DeltaByteArray/Zstd/Yes | Yes | +| `s_lbl_{labelName}` | Values for a given label name. Rows are sorted by metric name | ByteArray (string) | RLE_DICTIONARY/Zstd/No | Yes | +| `s_data_{n}` | Chunks columns (0 to data_cols_count). Each column contains data from `[n*duration, (n+1)*duration]` where duration is `24h/data_cols_count` | ByteArray (encoded chunks) | DeltaByteArray/Zstd/Yes | Yes | + +data_cols_count will be a parquet file metadata and its value is default to 3 but it can be configurable to adjust for different usecases. + +## Open Questions + +1. Should we use Parquet Gateway to replace Store Gateway + - Separate query engine and storage + - We can make Parquet Gateway semi-stateful like data locality for better performance + +## Acknowledgement + +We'd like to give huge credits for people from the Thanos community who started this initiative. + +- [Filip Petkovski](https://github.com/fpetkovski) and his initial [talk about Parquet](https://www.youtube.com/watch?v=V8Y4VuUwg8I) +- [Michael Hoffmann](https://github.com/MichaHoffmann) and his great work of [parquet poc](https://github.com/cloudflare/parquet-tsdb-poc) diff --git a/go.mod b/go.mod index ad39e448ad6..6f3e8901ec9 100644 --- a/go.mod +++ b/go.mod @@ -66,7 +66,7 @@ require ( go.opentelemetry.io/otel/sdk v1.35.0 go.opentelemetry.io/otel/trace v1.35.0 go.uber.org/atomic v1.11.0 - golang.org/x/net v0.37.0 + golang.org/x/net v0.38.0 golang.org/x/sync v0.12.0 golang.org/x/time v0.9.0 google.golang.org/grpc v1.70.0 @@ -81,9 +81,11 @@ require ( github.com/google/go-cmp v0.7.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 + github.com/prometheus/procfs v0.15.1 github.com/sercand/kuberesolver/v5 v5.1.1 github.com/tjhop/slog-gokit v0.1.3 go.opentelemetry.io/collector/pdata v1.24.0 + go.uber.org/automaxprocs v1.6.0 google.golang.org/protobuf v1.36.4 ) @@ -148,7 +150,7 @@ require ( github.com/goccy/go-json v0.10.3 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/googleapis v1.4.0 // indirect - github.com/golang-jwt/jwt/v5 v5.2.1 // indirect + github.com/golang-jwt/jwt/v5 v5.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.1.2 // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect @@ -199,7 +201,6 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus-community/prom-label-proxy v0.8.1-0.20240127162815-c1195f9aabc0 // indirect github.com/prometheus/exporter-toolkit v0.13.2 // indirect - github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/sigv4 v0.1.1 // indirect github.com/redis/rueidis v1.0.45-alpha.1 // indirect github.com/rs/cors v1.11.1 // indirect diff --git a/go.sum b/go.sum index f8f99249b1d..e8d52fe8ad6 100644 --- a/go.sum +++ b/go.sum @@ -1109,8 +1109,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= -github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-migrate/migrate/v4 v4.18.1 h1:JML/k+t4tpHCpQTCAD62Nu43NUFzHY4CV3uAuvHGC+Y= github.com/golang-migrate/migrate/v4 v4.18.1/go.mod h1:HAX6m3sQgcdO81tdjn5exv20+3Kb13cmGli1hrD6hks= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= @@ -1560,6 +1560,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus-community/prom-label-proxy v0.8.1-0.20240127162815-c1195f9aabc0 h1:owfYHh79h8Y5HvNMGyww+DaVwo10CKiRW1RQrrZzIwg= github.com/prometheus-community/prom-label-proxy v0.8.1-0.20240127162815-c1195f9aabc0/go.mod h1:rT989D4UtOcfd9tVqIZRVIM8rkg+9XbreBjFNEKXvVI= github.com/prometheus/alertmanager v0.28.1 h1:BK5pCoAtaKg01BYRUJhEDV1tqJMEtYBGzPw8QdvnnvA= @@ -1811,6 +1813,8 @@ go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -1985,8 +1989,8 @@ golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= -golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= diff --git a/integration/e2ecortex/client.go b/integration/e2ecortex/client.go index bd0568eb9f8..9067b60c078 100644 --- a/integration/e2ecortex/client.go +++ b/integration/e2ecortex/client.go @@ -115,9 +115,9 @@ func NewPromQueryClient(address string) (*Client, error) { } // Push the input timeseries to the remote endpoint -func (c *Client) Push(timeseries []prompb.TimeSeries) (*http.Response, error) { +func (c *Client) Push(timeseries []prompb.TimeSeries, metadata ...prompb.MetricMetadata) (*http.Response, error) { // Create write request - data, err := proto.Marshal(&prompb.WriteRequest{Timeseries: timeseries}) + data, err := proto.Marshal(&prompb.WriteRequest{Timeseries: timeseries, Metadata: metadata}) if err != nil { return nil, err } diff --git a/integration/ingester_metadata_test.go b/integration/ingester_metadata_test.go new file mode 100644 index 00000000000..0d0893bf665 --- /dev/null +++ b/integration/ingester_metadata_test.go @@ -0,0 +1,200 @@ +//go:build requires_docker +// +build requires_docker + +package integration + +import ( + "fmt" + "strings" + "testing" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/prompb" + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/integration/e2e" + e2edb "github.com/cortexproject/cortex/integration/e2e/db" + "github.com/cortexproject/cortex/integration/e2ecortex" +) + +func TestIngesterMetadata(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsul() + require.NoError(t, s.StartAndWaitReady(consul)) + + baseFlags := mergeFlags(AlertmanagerLocalFlags(), BlocksStorageFlags()) + + minio := e2edb.NewMinio(9000, baseFlags["-blocks-storage.s3.bucket-name"]) + require.NoError(t, s.StartAndWaitReady(minio)) + + flags := mergeFlags(baseFlags, map[string]string{ + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + // consul + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + }) + + // Start Cortex components + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(distributor, ingester, querier)) + + // Wait until distributor has updated the ring. + require.NoError(t, distributor.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"cortex_ring_members"}, e2e.WithLabelMatchers( + labels.MustNewMatcher(labels.MatchEqual, "name", "ingester"), + labels.MustNewMatcher(labels.MatchEqual, "state", "ACTIVE")))) + + // Wait until querier has updated the ring. + require.NoError(t, querier.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"cortex_ring_members"}, e2e.WithLabelMatchers( + labels.MustNewMatcher(labels.MatchEqual, "name", "ingester"), + labels.MustNewMatcher(labels.MatchEqual, "state", "ACTIVE")))) + + client, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), querier.HTTPEndpoint(), "", "", userID) + require.NoError(t, err) + + metadataMetricNum := 5 + metadataPerMetrics := 2 + metadata := make([]prompb.MetricMetadata, 0, metadataMetricNum) + for i := 0; i < metadataMetricNum; i++ { + for j := 0; j < metadataPerMetrics; j++ { + metadata = append(metadata, prompb.MetricMetadata{ + MetricFamilyName: fmt.Sprintf("metadata_name_%d", i), + Help: fmt.Sprintf("metadata_help_%d_%d", i, j), + Unit: fmt.Sprintf("metadata_unit_%d_%d", i, j), + }) + } + } + res, err := client.Push(nil, metadata...) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + testMetadataQueryParams(t, client, metadataMetricNum, metadataPerMetrics) +} + +func TestIngesterMetadataWithTenantFederation(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsul() + require.NoError(t, s.StartAndWaitReady(consul)) + + baseFlags := mergeFlags(AlertmanagerLocalFlags(), BlocksStorageFlags()) + + minio := e2edb.NewMinio(9000, baseFlags["-blocks-storage.s3.bucket-name"]) + require.NoError(t, s.StartAndWaitReady(minio)) + + flags := mergeFlags(baseFlags, map[string]string{ + // tenant federation + "-tenant-federation.enabled": "true", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + // consul + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + }) + + // Start Cortex components + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(distributor, ingester, querier)) + + // Wait until distributor has updated the ring. + require.NoError(t, distributor.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"cortex_ring_members"}, e2e.WithLabelMatchers( + labels.MustNewMatcher(labels.MatchEqual, "name", "ingester"), + labels.MustNewMatcher(labels.MatchEqual, "state", "ACTIVE")))) + + // Wait until querier has updated the ring. + require.NoError(t, querier.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"cortex_ring_members"}, e2e.WithLabelMatchers( + labels.MustNewMatcher(labels.MatchEqual, "name", "ingester"), + labels.MustNewMatcher(labels.MatchEqual, "state", "ACTIVE")))) + + metadataMetricNum := 5 + metadataPerMetrics := 2 + metadata := make([]prompb.MetricMetadata, 0, metadataMetricNum) + for i := 0; i < metadataMetricNum; i++ { + for j := 0; j < metadataPerMetrics; j++ { + metadata = append(metadata, prompb.MetricMetadata{ + MetricFamilyName: fmt.Sprintf("metadata_name_%d", i), + Help: fmt.Sprintf("metadata_help_%d_%d", i, j), + Unit: fmt.Sprintf("metadata_unit_%d_%d", i, j), + }) + } + } + + numUsers := 2 + tenantIDs := make([]string, numUsers) + for u := 0; u < numUsers; u++ { + tenantIDs[u] = fmt.Sprintf("user-%d", u) + c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), querier.HTTPEndpoint(), "", "", tenantIDs[u]) + require.NoError(t, err) + + res, err := c.Push(nil, metadata...) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + } + + client, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), querier.HTTPEndpoint(), "", "", strings.Join(tenantIDs, "|")) + require.NoError(t, err) + + testMetadataQueryParams(t, client, metadataMetricNum, metadataPerMetrics) +} + +func testMetadataQueryParams(t *testing.T, client *e2ecortex.Client, metadataMetricNum, metadataPerMetrics int) { + t.Run("test no parameter", func(t *testing.T) { + result, err := client.Metadata("", "") + require.NoError(t, err) + require.Equal(t, metadataMetricNum, len(result)) + + for _, v := range result { + require.Equal(t, metadataPerMetrics, len(v)) + } + }) + + t.Run("test name parameter", func(t *testing.T) { + t.Run("existing name", func(t *testing.T) { + name := "metadata_name_0" + result, err := client.Metadata(name, "") + require.NoError(t, err) + m, ok := result[name] + require.True(t, ok) + require.Equal(t, metadataPerMetrics, len(m)) + }) + t.Run("existing name with limit 0", func(t *testing.T) { + name := "metadata_name_0" + result, err := client.Metadata(name, "0") + require.NoError(t, err) + require.Equal(t, 0, len(result)) + }) + t.Run("non-existing name", func(t *testing.T) { + result, err := client.Metadata("dummy", "") + require.NoError(t, err) + require.Equal(t, 0, len(result)) + }) + }) + + t.Run("test limit parameter", func(t *testing.T) { + t.Run("less than length of metadata", func(t *testing.T) { + result, err := client.Metadata("", "3") + require.NoError(t, err) + require.Equal(t, 3, len(result)) + }) + t.Run("limit: 0", func(t *testing.T) { + result, err := client.Metadata("", "0") + require.NoError(t, err) + require.Equal(t, 0, len(result)) + }) + t.Run("invalid limit", func(t *testing.T) { + _, err := client.Metadata("", "dummy") + require.Error(t, err) + }) + }) +} diff --git a/integration/otlp_test.go b/integration/otlp_test.go index df5b632b8aa..7eda34e55ec 100644 --- a/integration/otlp_test.go +++ b/integration/otlp_test.go @@ -88,7 +88,7 @@ func TestOTLP(t *testing.T) { require.NoError(t, err) require.Equal(t, []string{"__name__", "foo"}, labelNames) - metadataResult, err := c.Metadata("series_1", "") + metadataResult, err := c.Metadata("series_1_total", "") require.NoError(t, err) require.Equal(t, 1, len(metadataResult)) diff --git a/integration/query_fuzz_test.go b/integration/query_fuzz_test.go index 470859b72e3..e92ac9fbdb6 100644 --- a/integration/query_fuzz_test.go +++ b/integration/query_fuzz_test.go @@ -967,7 +967,7 @@ var comparer = cmp.Comparer(func(x, y model.Value) bool { } compareHistograms := func(l, r *model.SampleHistogram) bool { - return l == r || (l.Count == r.Count && compareFloats(float64(l.Sum), float64(r.Sum)) && compareHistogramBuckets(l.Buckets, r.Buckets)) + return l == r || (compareFloats(float64(l.Count), float64(r.Count)) && compareFloats(float64(l.Sum), float64(r.Sum)) && compareHistogramBuckets(l.Buckets, r.Buckets)) } // count_values returns a metrics with one label {"value": "1.012321"} diff --git a/integration/ruler_test.go b/integration/ruler_test.go index 71015a54b53..a08a8bbd4e3 100644 --- a/integration/ruler_test.go +++ b/integration/ruler_test.go @@ -311,6 +311,16 @@ func testRulerAPIWithSharding(t *testing.T, enableRulesBackup bool) { expectedNames := make([]string, numRulesGroups) alertCount := 0 evalInterval, _ := model.ParseDuration("1s") + groupLabels := map[string]string{ + "group_label_1": "val1", + "group_label_2": "val2", + "duplicate_label": "group_val", + } + ruleLabels := map[string]string{ + "rule_label_1": "val1", + "rule_label_2": "val2", + "duplicate_label": "rule_val", + } for i := 0; i < numRulesGroups; i++ { num := random.Intn(100) var ruleNode yaml.Node @@ -319,7 +329,6 @@ func testRulerAPIWithSharding(t *testing.T, enableRulesBackup bool) { ruleNode.SetString(fmt.Sprintf("rule_%d", i)) exprNode.SetString(strconv.Itoa(i)) ruleName := fmt.Sprintf("test_%d", i) - expectedNames[i] = ruleName if num%2 == 0 { alertCount++ @@ -327,9 +336,11 @@ func testRulerAPIWithSharding(t *testing.T, enableRulesBackup bool) { Name: ruleName, Interval: evalInterval, Rules: []rulefmt.RuleNode{{ - Alert: ruleNode, - Expr: exprNode, + Alert: ruleNode, + Expr: exprNode, + Labels: ruleLabels, }}, + Labels: groupLabels, } } else { ruleGroups[i] = rulefmt.RuleGroup{ @@ -337,8 +348,9 @@ func testRulerAPIWithSharding(t *testing.T, enableRulesBackup bool) { Interval: evalInterval, Rules: []rulefmt.RuleNode{{ Record: ruleNode, - Expr: exprNode, + Labels: ruleLabels, }}, + Labels: groupLabels, } } } @@ -487,6 +499,32 @@ func testRulerAPIWithSharding(t *testing.T, enableRulesBackup bool) { assert.Greater(t, alertsCount, 0, "Expected greater than 0 alerts but got %d", alertsCount) }, }, + "Filter Rules and verify Group Labels exist": { + filter: e2ecortex.RuleFilter{ + RuleType: "alert", + }, + resultCheckFn: func(t assert.TestingT, ruleGroups []*ruler.RuleGroup) { + for _, ruleGroup := range ruleGroups { + rule := ruleGroup.Rules[0].(map[string]interface{}) + ruleType := rule["type"] + assert.Equal(t, "alerting", ruleType, "Expected 'alerting' rule type but got %s", ruleType) + responseJson, err := json.Marshal(rule) + assert.NoError(t, err) + ar := &alertingRule{} + assert.NoError(t, json.Unmarshal(responseJson, ar)) + if !ar.LastEvaluation.IsZero() { + // Labels will be merged only if groups are loaded to Prometheus rule manager + assert.Equal(t, 5, len(ar.Labels)) + } + for _, label := range ar.Labels { + if label.Name == "duplicate_label" { + // rule label should override group label + assert.Equal(t, ruleLabels["duplicate_label"], label.Value) + } + } + } + }, + }, } // For each test case, fetch the rules with configured filters, and ensure the results match. if enableRulesBackup { diff --git a/pkg/configs/instance_limits.go b/pkg/configs/instance_limits.go new file mode 100644 index 00000000000..8273400847c --- /dev/null +++ b/pkg/configs/instance_limits.go @@ -0,0 +1,40 @@ +package configs + +import ( + "errors" + "flag" + "strings" + + "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/resource" +) + +type InstanceLimits struct { + CPUUtilization float64 `yaml:"cpu_utilization"` + HeapUtilization float64 `yaml:"heap_utilization"` +} + +func (cfg *InstanceLimits) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { + f.Float64Var(&cfg.CPUUtilization, prefix+"instance-limits.cpu-utilization", 0, "EXPERIMENTAL: Max CPU utilization that this ingester can reach before rejecting new query request (across all tenants) in percentage, between 0 and 1. monitored_resources config must include the resource type. 0 to disable.") + f.Float64Var(&cfg.HeapUtilization, prefix+"instance-limits.heap-utilization", 0, "EXPERIMENTAL: Max heap utilization that this ingester can reach before rejecting new query request (across all tenants) in percentage, between 0 and 1. monitored_resources config must include the resource type. 0 to disable.") +} + +func (cfg *InstanceLimits) Validate(monitoredResources flagext.StringSliceCSV) error { + if cfg.CPUUtilization > 1 || cfg.CPUUtilization < 0 { + return errors.New("cpu_utilization must be between 0 and 1") + } + + if cfg.CPUUtilization > 0 && !strings.Contains(monitoredResources.String(), string(resource.CPU)) { + return errors.New("monitored_resources config must include \"cpu\" as well") + } + + if cfg.HeapUtilization > 1 || cfg.HeapUtilization < 0 { + return errors.New("heap_utilization must be between 0 and 1") + } + + if cfg.HeapUtilization > 0 && !strings.Contains(monitoredResources.String(), string(resource.Heap)) { + return errors.New("monitored_resources config must include \"heap\" as well") + } + + return nil +} diff --git a/pkg/configs/instance_limits_test.go b/pkg/configs/instance_limits_test.go new file mode 100644 index 00000000000..a1d3686dba6 --- /dev/null +++ b/pkg/configs/instance_limits_test.go @@ -0,0 +1,64 @@ +package configs + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_Validate(t *testing.T) { + for name, tc := range map[string]struct { + instanceLimits InstanceLimits + monitoredResources []string + err error + }{ + "correct config should pass validation": { + instanceLimits: InstanceLimits{ + CPUUtilization: 0.5, + HeapUtilization: 0.5, + }, + monitoredResources: []string{"cpu", "heap"}, + err: nil, + }, + "utilization config less than 0 should fail validation": { + instanceLimits: InstanceLimits{ + CPUUtilization: -0.5, + HeapUtilization: 0.5, + }, + monitoredResources: []string{"cpu", "heap"}, + err: errors.New("cpu_utilization must be between 0 and 1"), + }, + "utilization config greater than 1 should fail validation": { + instanceLimits: InstanceLimits{ + CPUUtilization: 0.5, + HeapUtilization: 1.5, + }, + monitoredResources: []string{"cpu", "heap"}, + err: errors.New("heap_utilization must be between 0 and 1"), + }, + "missing cpu in monitored_resources config should fail validation": { + instanceLimits: InstanceLimits{ + CPUUtilization: 0.5, + }, + monitoredResources: []string{"heap"}, + err: errors.New("monitored_resources config must include \"cpu\" as well"), + }, + "missing heap in monitored_resources config should fail validation": { + instanceLimits: InstanceLimits{ + HeapUtilization: 0.5, + }, + monitoredResources: []string{"cpu"}, + err: errors.New("monitored_resources config must include \"heap\" as well"), + }, + } { + t.Run(name, func(t *testing.T) { + err := tc.instanceLimits.Validate(tc.monitoredResources) + if tc.err != nil { + require.Errorf(t, err, tc.err.Error()) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/pkg/cortex/cortex.go b/pkg/cortex/cortex.go index 00b67879f06..a782ed4d7b8 100644 --- a/pkg/cortex/cortex.go +++ b/pkg/cortex/cortex.go @@ -22,6 +22,7 @@ import ( "gopkg.in/yaml.v2" "github.com/cortexproject/cortex/pkg/util/grpcclient" + "github.com/cortexproject/cortex/pkg/util/resource" "github.com/cortexproject/cortex/pkg/alertmanager" "github.com/cortexproject/cortex/pkg/alertmanager/alertstore" @@ -88,10 +89,11 @@ var ( // Config is the root config for Cortex. type Config struct { - Target flagext.StringSliceCSV `yaml:"target"` - AuthEnabled bool `yaml:"auth_enabled"` - PrintConfig bool `yaml:"-"` - HTTPPrefix string `yaml:"http_prefix"` + Target flagext.StringSliceCSV `yaml:"target"` + AuthEnabled bool `yaml:"auth_enabled"` + PrintConfig bool `yaml:"-"` + HTTPPrefix string `yaml:"http_prefix"` + MonitoredResources flagext.StringSliceCSV `yaml:"monitored_resources"` ExternalQueryable prom_storage.Queryable `yaml:"-"` ExternalPusher ruler.Pusher `yaml:"-"` @@ -143,6 +145,11 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&c.PrintConfig, "print.config", false, "Print the config and exit.") f.StringVar(&c.HTTPPrefix, "http.prefix", "/api/prom", "HTTP path prefix for Cortex API.") + c.MonitoredResources = []string{} + f.Var(&c.MonitoredResources, "monitored.resources", "Comma-separated list of resources to monitor. "+ + "Supported values are cpu and heap, which tracks metrics from github.com/prometheus/procfs and runtime/metrics "+ + "that are close estimates. Empty string to disable.") + c.API.RegisterFlags(f) c.registerServerFlagsWithChangedDefaultValues(f) c.Distributor.RegisterFlags(f) @@ -216,7 +223,7 @@ func (c *Config) Validate(log log.Logger) error { if err := c.QueryRange.Validate(c.Querier); err != nil { return errors.Wrap(err, "invalid query_range config") } - if err := c.StoreGateway.Validate(c.LimitsConfig); err != nil { + if err := c.StoreGateway.Validate(c.LimitsConfig, c.MonitoredResources); err != nil { return errors.Wrap(err, "invalid store-gateway config") } if err := c.Compactor.Validate(c.LimitsConfig); err != nil { @@ -229,7 +236,7 @@ func (c *Config) Validate(log log.Logger) error { return errors.Wrap(err, "invalid alertmanager config") } - if err := c.Ingester.Validate(); err != nil { + if err := c.Ingester.Validate(c.MonitoredResources); err != nil { return errors.Wrap(err, "invalid ingester config") } @@ -237,6 +244,16 @@ func (c *Config) Validate(log log.Logger) error { return errors.Wrap(err, "invalid tracing config") } + for _, r := range c.MonitoredResources { + switch resource.Type(r) { + case resource.CPU, resource.Heap: + default: + if len(r) > 0 { + return fmt.Errorf("unsupported resource type to monitor: %s", r) + } + } + } + return nil } @@ -315,6 +332,7 @@ type Cortex struct { MetadataQuerier querier.MetadataQuerier QuerierEngine promql.QueryEngine QueryFrontendTripperware tripperware.Tripperware + ResourceMonitor *resource.Monitor Ruler *ruler.Ruler RulerStorage rulestore.RuleStore diff --git a/pkg/cortex/cortex_test.go b/pkg/cortex/cortex_test.go index bac7c0021c6..74bf0750a33 100644 --- a/pkg/cortex/cortex_test.go +++ b/pkg/cortex/cortex_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "flag" + "fmt" "io" "net" "os" @@ -165,11 +166,29 @@ func TestConfigValidation(t *testing.T) { }, expectedError: errInvalidHTTPPrefix, }, + { + name: "should fail validation for invalid resource to monitor", + getTestConfig: func() *Config { + configuration := newDefaultConfig() + configuration.MonitoredResources = []string{"wrong"} + return configuration + }, + expectedError: fmt.Errorf("unsupported resource type to monitor: %s", "wrong"), + }, + { + name: "should not fail validation for valid resources to monitor", + getTestConfig: func() *Config { + configuration := newDefaultConfig() + configuration.MonitoredResources = []string{"cpu", "heap"} + return configuration + }, + expectedError: nil, + }, } { t.Run(tc.name, func(t *testing.T) { err := tc.getTestConfig().Validate(nil) if tc.expectedError != nil { - require.Equal(t, tc.expectedError, err) + require.ErrorContains(t, err, tc.expectedError.Error()) } else { require.NoError(t, err) } diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index 091297732d7..3f6c06b79d4 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -6,6 +6,8 @@ import ( "fmt" "log/slog" "net/http" + "runtime" + "runtime/debug" "github.com/go-kit/log/level" "github.com/opentracing-contrib/go-stdlib/nethttp" @@ -51,6 +53,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/grpcclient" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/modules" + "github.com/cortexproject/cortex/pkg/util/resource" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/validation" @@ -86,6 +89,7 @@ const ( Purger string = "purger" QueryScheduler string = "query-scheduler" TenantFederation string = "tenant-federation" + ResourceMonitor string = "resource-monitor" All string = "all" ) @@ -441,7 +445,7 @@ func (t *Cortex) initIngesterService() (serv services.Service, err error) { t.Cfg.Ingester.QueryIngestersWithin = t.Cfg.Querier.QueryIngestersWithin t.tsdbIngesterConfig() - t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Overrides, prometheus.DefaultRegisterer, util_log.Logger) + t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Overrides, prometheus.DefaultRegisterer, util_log.Logger, t.ResourceMonitor) if err != nil { return } @@ -705,7 +709,7 @@ func (t *Cortex) initCompactor() (serv services.Service, err error) { func (t *Cortex) initStoreGateway() (serv services.Service, err error) { t.Cfg.StoreGateway.ShardingRing.ListenPort = t.Cfg.Server.GRPCListenPort - t.StoreGateway, err = storegateway.NewStoreGateway(t.Cfg.StoreGateway, t.Cfg.BlocksStorage, t.Overrides, t.Cfg.Server.LogLevel, util_log.Logger, prometheus.DefaultRegisterer) + t.StoreGateway, err = storegateway.NewStoreGateway(t.Cfg.StoreGateway, t.Cfg.BlocksStorage, t.Overrides, t.Cfg.Server.LogLevel, util_log.Logger, prometheus.DefaultRegisterer, t.ResourceMonitor) if err != nil { return nil, err } @@ -765,11 +769,36 @@ func (t *Cortex) initQueryScheduler() (services.Service, error) { return s, nil } +func (t *Cortex) initResourceMonitor() (services.Service, error) { + if len(t.Cfg.MonitoredResources) == 0 { + return nil, nil + } + + containerLimits := make(map[resource.Type]float64) + for _, res := range t.Cfg.MonitoredResources { + switch resource.Type(res) { + case resource.CPU: + containerLimits[resource.Type(res)] = float64(runtime.GOMAXPROCS(0)) + case resource.Heap: + containerLimits[resource.Type(res)] = float64(debug.SetMemoryLimit(-1)) + } + } + + var err error + t.ResourceMonitor, err = resource.NewMonitor(containerLimits, prometheus.DefaultRegisterer) + if t.ResourceMonitor != nil { + util_log.WarnExperimentalUse("resource monitor") + } + + return t.ResourceMonitor, err +} + func (t *Cortex) setupModuleManager() error { mm := modules.NewManager(util_log.Logger) // Register all modules here. // RegisterModule(name string, initFn func()(services.Service, error)) + mm.RegisterModule(ResourceMonitor, t.initResourceMonitor) mm.RegisterModule(Server, t.initServer, modules.UserInvisibleModule) mm.RegisterModule(API, t.initAPI, modules.UserInvisibleModule) mm.RegisterModule(RuntimeConfig, t.initRuntimeConfig, modules.UserInvisibleModule) @@ -811,7 +840,7 @@ func (t *Cortex) setupModuleManager() error { Distributor: {DistributorService, API, GrpcClientService}, DistributorService: {Ring, Overrides}, Ingester: {IngesterService, Overrides, API}, - IngesterService: {Overrides, RuntimeConfig, MemberlistKV}, + IngesterService: {Overrides, RuntimeConfig, MemberlistKV, ResourceMonitor}, Flusher: {Overrides, API}, Queryable: {Overrides, DistributorService, Overrides, Ring, API, StoreQueryable, MemberlistKV}, Querier: {TenantFederation}, @@ -824,7 +853,7 @@ func (t *Cortex) setupModuleManager() error { Configs: {API}, AlertManager: {API, MemberlistKV, Overrides}, Compactor: {API, MemberlistKV, Overrides}, - StoreGateway: {API, Overrides, MemberlistKV}, + StoreGateway: {API, Overrides, MemberlistKV, ResourceMonitor}, TenantDeletion: {API, Overrides}, Purger: {TenantDeletion}, TenantFederation: {Queryable}, diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index a6a6efb8fec..143f33b42b7 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -1466,13 +1466,12 @@ func (d *Distributor) metricsForLabelMatchersCommon(ctx context.Context, from, t } // MetricsMetadata returns all metric metadata of a user. -func (d *Distributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) { +func (d *Distributor) MetricsMetadata(ctx context.Context, req *ingester_client.MetricsMetadataRequest) ([]scrape.MetricMetadata, error) { replicationSet, err := d.GetIngestersForMetadata(ctx) if err != nil { return nil, err } - req := &ingester_client.MetricsMetadataRequest{} // TODO(gotjosh): We only need to look in all the ingesters if shardByAllLabels is enabled. resps, err := d.ForReplicationSet(ctx, replicationSet, d.cfg.ZoneResultsQuorumMetadata, false, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { return client.MetricsMetadata(ctx, req) diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 16177ad62b1..7113860fa1e 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -2774,7 +2774,7 @@ func TestDistributor_MetricsMetadata(t *testing.T) { require.NoError(t, err) // Assert on metric metadata - metadata, err := ds[0].MetricsMetadata(ctx) + metadata, err := ds[0].MetricsMetadata(ctx, &client.MetricsMetadataRequest{Limit: -1, LimitPerMetric: -1, Metric: ""}) require.NoError(t, err) assert.Equal(t, 10, len(metadata)) diff --git a/pkg/frontend/transport/handler.go b/pkg/frontend/transport/handler.go index c1c6ce05d73..7d1c5d9cb1e 100644 --- a/pkg/frontend/transport/handler.go +++ b/pkg/frontend/transport/handler.go @@ -50,6 +50,7 @@ const ( reasonRequestBodySizeExceeded = "request_body_size_exceeded" reasonResponseBodySizeExceeded = "response_body_size_exceeded" reasonTooManyRequests = "too_many_requests" + reasonResourceExhausted = "resource_exhausted" reasonTimeRangeExceeded = "time_range_exceeded" reasonTooManySamples = "too_many_samples" reasonSeriesFetched = "series_fetched" @@ -167,18 +168,7 @@ func NewHandler(cfg HandlerConfig, tenantFederationCfg tenantfederation.Config, []string{"reason", "source", "user"}, ) - h.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(func(user string) { - h.querySeconds.DeleteLabelValues(user) - h.queryFetchedSeries.DeleteLabelValues(user) - h.queryFetchedSamples.DeleteLabelValues(user) - h.queryScannedSamples.DeleteLabelValues(user) - h.queryPeakSamples.DeleteLabelValues(user) - h.queryChunkBytes.DeleteLabelValues(user) - h.queryDataBytes.DeleteLabelValues(user) - if err := util.DeleteMatchingLabels(h.rejectedQueries, map[string]string{"user": user}); err != nil { - level.Warn(log).Log("msg", "failed to remove cortex_rejected_queries_total metric for user", "user", user, "err", err) - } - }) + h.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(h.cleanupMetricsForInactiveUser) // If cleaner stops or fail, we will simply not clean the metrics for inactive users. _ = h.activeUsers.StartAsync(context.Background()) } @@ -186,6 +176,41 @@ func NewHandler(cfg HandlerConfig, tenantFederationCfg tenantfederation.Config, return h } +func (h *Handler) cleanupMetricsForInactiveUser(user string) { + if !h.cfg.QueryStatsEnabled { + return + } + + // Create a map with the user label to match + userLabel := map[string]string{"user": user} + + // Clean up all metrics for the user + if err := util.DeleteMatchingLabels(h.querySeconds, userLabel); err != nil { + level.Warn(h.log).Log("msg", "failed to remove cortex_query_seconds_total metric for user", "user", user, "err", err) + } + if err := util.DeleteMatchingLabels(h.queryFetchedSeries, userLabel); err != nil { + level.Warn(h.log).Log("msg", "failed to remove cortex_query_fetched_series_total metric for user", "user", user, "err", err) + } + if err := util.DeleteMatchingLabels(h.queryFetchedSamples, userLabel); err != nil { + level.Warn(h.log).Log("msg", "failed to remove cortex_query_samples_total metric for user", "user", user, "err", err) + } + if err := util.DeleteMatchingLabels(h.queryScannedSamples, userLabel); err != nil { + level.Warn(h.log).Log("msg", "failed to remove cortex_query_samples_scanned_total metric for user", "user", user, "err", err) + } + if err := util.DeleteMatchingLabels(h.queryPeakSamples, userLabel); err != nil { + level.Warn(h.log).Log("msg", "failed to remove cortex_query_peak_samples metric for user", "user", user, "err", err) + } + if err := util.DeleteMatchingLabels(h.queryChunkBytes, userLabel); err != nil { + level.Warn(h.log).Log("msg", "failed to remove cortex_query_fetched_chunks_bytes_total metric for user", "user", user, "err", err) + } + if err := util.DeleteMatchingLabels(h.queryDataBytes, userLabel); err != nil { + level.Warn(h.log).Log("msg", "failed to remove cortex_query_fetched_data_bytes_total metric for user", "user", user, "err", err) + } + if err := util.DeleteMatchingLabels(h.rejectedQueries, userLabel); err != nil { + level.Warn(h.log).Log("msg", "failed to remove cortex_rejected_queries_total metric for user", "user", user, "err", err) + } +} + func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var ( stats *querier_stats.QueryStats @@ -496,7 +521,8 @@ func (f *Handler) reportQueryStats(r *http.Request, source, userID string, query reason = reasonTooManyRequests } else if statusCode == http.StatusRequestEntityTooLarge { reason = reasonResponseBodySizeExceeded - } else if statusCode == http.StatusUnprocessableEntity { + } else if statusCode == http.StatusUnprocessableEntity && error != nil { + // We are unable to use errors.As to compare since body string from the http response is wrapped as an error errMsg := error.Error() if strings.Contains(errMsg, limitTooManySamples) { reason = reasonTooManySamples @@ -516,6 +542,8 @@ func (f *Handler) reportQueryStats(r *http.Request, source, userID string, query reason = reasonChunksLimitStoreGateway } else if strings.Contains(errMsg, limitBytesStoreGateway) { reason = reasonBytesLimitStoreGateway + } else if strings.Contains(errMsg, limiter.ErrResourceLimitReachedStr) { + reason = reasonResourceExhausted } } if len(reason) > 0 { diff --git a/pkg/frontend/transport/handler_test.go b/pkg/frontend/transport/handler_test.go index a7b91ad2407..72e28742f5b 100644 --- a/pkg/frontend/transport/handler_test.go +++ b/pkg/frontend/transport/handler_test.go @@ -29,6 +29,7 @@ import ( "github.com/cortexproject/cortex/pkg/querier/tripperware" "github.com/cortexproject/cortex/pkg/tenant" util_api "github.com/cortexproject/cortex/pkg/util/api" + "github.com/cortexproject/cortex/pkg/util/limiter" util_log "github.com/cortexproject/cortex/pkg/util/log" ) @@ -380,6 +381,23 @@ func TestHandler_ServeHTTP(t *testing.T) { }, expectedStatusCode: http.StatusUnprocessableEntity, }, + { + name: "test handler with reasonResourceExhausted", + cfg: HandlerConfig{QueryStatsEnabled: true}, + expectedMetrics: 6, + roundTripperFunc: roundTripperFunc(func(req *http.Request) (*http.Response, error) { + resourceLimitReachedErr := &limiter.ResourceLimitReachedError{} + return &http.Response{ + StatusCode: http.StatusUnprocessableEntity, + Body: io.NopCloser(strings.NewReader(resourceLimitReachedErr.Error())), + }, nil + }), + additionalMetricsCheckFunc: func(h *Handler) { + v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonResourceExhausted, tripperware.SourceAPI, userID)) + assert.Equal(t, float64(1), v) + }, + expectedStatusCode: http.StatusUnprocessableEntity, + }, } { t.Run(tt.name, func(t *testing.T) { reg := prometheus.NewPedanticRegistry() @@ -637,3 +655,109 @@ func Test_TenantFederation_MaxTenant(t *testing.T) { }) } } + +func TestHandlerMetricsCleanup(t *testing.T) { + reg := prometheus.NewPedanticRegistry() + handler := NewHandler(HandlerConfig{QueryStatsEnabled: true}, tenantfederation.Config{}, http.DefaultTransport, log.NewNopLogger(), reg) + + user1 := "user1" + user2 := "user2" + source := "api" + + // Simulate activity for user1 + handler.querySeconds.WithLabelValues(source, user1).Add(1.0) + handler.queryFetchedSeries.WithLabelValues(source, user1).Add(100) + handler.queryFetchedSamples.WithLabelValues(source, user1).Add(1000) + handler.queryScannedSamples.WithLabelValues(source, user1).Add(2000) + handler.queryPeakSamples.WithLabelValues(source, user1).Observe(500) + handler.queryChunkBytes.WithLabelValues(source, user1).Add(1024) + handler.queryDataBytes.WithLabelValues(source, user1).Add(2048) + handler.rejectedQueries.WithLabelValues(reasonTooManySamples, source, user1).Add(5) + + // Simulate activity for user2 + handler.querySeconds.WithLabelValues(source, user2).Add(2.0) + handler.queryFetchedSeries.WithLabelValues(source, user2).Add(200) + handler.queryFetchedSamples.WithLabelValues(source, user2).Add(2000) + handler.queryScannedSamples.WithLabelValues(source, user2).Add(4000) + handler.queryPeakSamples.WithLabelValues(source, user2).Observe(1000) + handler.queryChunkBytes.WithLabelValues(source, user2).Add(2048) + handler.queryDataBytes.WithLabelValues(source, user2).Add(4096) + handler.rejectedQueries.WithLabelValues(reasonTooManySamples, source, user2).Add(10) + + // Verify initial state - both users should have metrics + require.NoError(t, promtest.GatherAndCompare(reg, strings.NewReader(` + # HELP cortex_query_seconds_total Total amount of wall clock time spend processing queries. + # TYPE cortex_query_seconds_total counter + cortex_query_seconds_total{source="api",user="user1"} 1 + cortex_query_seconds_total{source="api",user="user2"} 2 + # HELP cortex_query_fetched_series_total Number of series fetched to execute a query. + # TYPE cortex_query_fetched_series_total counter + cortex_query_fetched_series_total{source="api",user="user1"} 100 + cortex_query_fetched_series_total{source="api",user="user2"} 200 + # HELP cortex_query_samples_total Number of samples fetched to execute a query. + # TYPE cortex_query_samples_total counter + cortex_query_samples_total{source="api",user="user1"} 1000 + cortex_query_samples_total{source="api",user="user2"} 2000 + # HELP cortex_query_samples_scanned_total Number of samples scanned to execute a query. + # TYPE cortex_query_samples_scanned_total counter + cortex_query_samples_scanned_total{source="api",user="user1"} 2000 + cortex_query_samples_scanned_total{source="api",user="user2"} 4000 + # HELP cortex_query_peak_samples Highest count of samples considered to execute a query. + # TYPE cortex_query_peak_samples histogram + cortex_query_peak_samples_bucket{source="api",user="user1",le="+Inf"} 1 + cortex_query_peak_samples_sum{source="api",user="user1"} 500 + cortex_query_peak_samples_count{source="api",user="user1"} 1 + cortex_query_peak_samples_bucket{source="api",user="user2",le="+Inf"} 1 + cortex_query_peak_samples_sum{source="api",user="user2"} 1000 + cortex_query_peak_samples_count{source="api",user="user2"} 1 + # HELP cortex_query_fetched_chunks_bytes_total Size of all chunks fetched to execute a query in bytes. + # TYPE cortex_query_fetched_chunks_bytes_total counter + cortex_query_fetched_chunks_bytes_total{source="api",user="user1"} 1024 + cortex_query_fetched_chunks_bytes_total{source="api",user="user2"} 2048 + # HELP cortex_query_fetched_data_bytes_total Size of all data fetched to execute a query in bytes. + # TYPE cortex_query_fetched_data_bytes_total counter + cortex_query_fetched_data_bytes_total{source="api",user="user1"} 2048 + cortex_query_fetched_data_bytes_total{source="api",user="user2"} 4096 + # HELP cortex_rejected_queries_total The total number of queries that were rejected. + # TYPE cortex_rejected_queries_total counter + cortex_rejected_queries_total{reason="too_many_samples",source="api",user="user1"} 5 + cortex_rejected_queries_total{reason="too_many_samples",source="api",user="user2"} 10 + `), "cortex_query_seconds_total", "cortex_query_fetched_series_total", "cortex_query_samples_total", + "cortex_query_samples_scanned_total", "cortex_query_peak_samples", "cortex_query_fetched_chunks_bytes_total", + "cortex_query_fetched_data_bytes_total", "cortex_rejected_queries_total")) + + // Clean up metrics for user1 + handler.cleanupMetricsForInactiveUser(user1) + + // Verify final state - only user2 should have metrics + require.NoError(t, promtest.GatherAndCompare(reg, strings.NewReader(` + # HELP cortex_query_seconds_total Total amount of wall clock time spend processing queries. + # TYPE cortex_query_seconds_total counter + cortex_query_seconds_total{source="api",user="user2"} 2 + # HELP cortex_query_fetched_series_total Number of series fetched to execute a query. + # TYPE cortex_query_fetched_series_total counter + cortex_query_fetched_series_total{source="api",user="user2"} 200 + # HELP cortex_query_samples_total Number of samples fetched to execute a query. + # TYPE cortex_query_samples_total counter + cortex_query_samples_total{source="api",user="user2"} 2000 + # HELP cortex_query_samples_scanned_total Number of samples scanned to execute a query. + # TYPE cortex_query_samples_scanned_total counter + cortex_query_samples_scanned_total{source="api",user="user2"} 4000 + # HELP cortex_query_peak_samples Highest count of samples considered to execute a query. + # TYPE cortex_query_peak_samples histogram + cortex_query_peak_samples_bucket{source="api",user="user2",le="+Inf"} 1 + cortex_query_peak_samples_sum{source="api",user="user2"} 1000 + cortex_query_peak_samples_count{source="api",user="user2"} 1 + # HELP cortex_query_fetched_chunks_bytes_total Size of all chunks fetched to execute a query in bytes. + # TYPE cortex_query_fetched_chunks_bytes_total counter + cortex_query_fetched_chunks_bytes_total{source="api",user="user2"} 2048 + # HELP cortex_query_fetched_data_bytes_total Size of all data fetched to execute a query in bytes. + # TYPE cortex_query_fetched_data_bytes_total counter + cortex_query_fetched_data_bytes_total{source="api",user="user2"} 4096 + # HELP cortex_rejected_queries_total The total number of queries that were rejected. + # TYPE cortex_rejected_queries_total counter + cortex_rejected_queries_total{reason="too_many_samples",source="api",user="user2"} 10 + `), "cortex_query_seconds_total", "cortex_query_fetched_series_total", "cortex_query_samples_total", + "cortex_query_samples_scanned_total", "cortex_query_peak_samples", "cortex_query_fetched_chunks_bytes_total", + "cortex_query_fetched_data_bytes_total", "cortex_rejected_queries_total")) +} diff --git a/pkg/ingester/active_series.go b/pkg/ingester/active_series.go index 5285f279639..1c3bf4c6d86 100644 --- a/pkg/ingester/active_series.go +++ b/pkg/ingester/active_series.go @@ -25,15 +25,17 @@ type activeSeriesStripe struct { // without holding the lock -- hence the atomic). oldestEntryTs atomic.Int64 - mu sync.RWMutex - refs map[uint64][]activeSeriesEntry - active int // Number of active entries in this stripe. Only decreased during purge or clear. + mu sync.RWMutex + refs map[uint64][]activeSeriesEntry + active int // Number of active entries in this stripe. Only decreased during purge or clear. + activeNativeHistogram int // Number of active entries only for Native Histogram in this stripe. Only decreased during purge or clear. } // activeSeriesEntry holds a timestamp for single series. type activeSeriesEntry struct { - lbs labels.Labels - nanos *atomic.Int64 // Unix timestamp in nanoseconds. Needs to be a pointer because we don't store pointers to entries in the stripe. + lbs labels.Labels + nanos *atomic.Int64 // Unix timestamp in nanoseconds. Needs to be a pointer because we don't store pointers to entries in the stripe. + isNativeHistogram bool } func NewActiveSeries() *ActiveSeries { @@ -48,10 +50,10 @@ func NewActiveSeries() *ActiveSeries { } // Updates series timestamp to 'now'. Function is called to make a copy of labels if entry doesn't exist yet. -func (c *ActiveSeries) UpdateSeries(series labels.Labels, hash uint64, now time.Time, labelsCopy func(labels.Labels) labels.Labels) { +func (c *ActiveSeries) UpdateSeries(series labels.Labels, hash uint64, now time.Time, nativeHistogram bool, labelsCopy func(labels.Labels) labels.Labels) { stripeID := hash % numActiveSeriesStripes - c.stripes[stripeID].updateSeriesTimestamp(now, series, hash, labelsCopy) + c.stripes[stripeID].updateSeriesTimestamp(now, series, hash, nativeHistogram, labelsCopy) } // Purge removes expired entries from the cache. This function should be called @@ -77,13 +79,21 @@ func (c *ActiveSeries) Active() int { return total } -func (s *activeSeriesStripe) updateSeriesTimestamp(now time.Time, series labels.Labels, fingerprint uint64, labelsCopy func(labels.Labels) labels.Labels) { +func (c *ActiveSeries) ActiveNativeHistogram() int { + total := 0 + for s := 0; s < numActiveSeriesStripes; s++ { + total += c.stripes[s].getActiveNativeHistogram() + } + return total +} + +func (s *activeSeriesStripe) updateSeriesTimestamp(now time.Time, series labels.Labels, fingerprint uint64, nativeHistogram bool, labelsCopy func(labels.Labels) labels.Labels) { nowNanos := now.UnixNano() e := s.findEntryForSeries(fingerprint, series) entryTimeSet := false if e == nil { - e, entryTimeSet = s.findOrCreateEntryForSeries(fingerprint, series, nowNanos, labelsCopy) + e, entryTimeSet = s.findOrCreateEntryForSeries(fingerprint, series, nowNanos, nativeHistogram, labelsCopy) } if !entryTimeSet { @@ -117,7 +127,7 @@ func (s *activeSeriesStripe) findEntryForSeries(fingerprint uint64, series label return nil } -func (s *activeSeriesStripe) findOrCreateEntryForSeries(fingerprint uint64, series labels.Labels, nowNanos int64, labelsCopy func(labels.Labels) labels.Labels) (*atomic.Int64, bool) { +func (s *activeSeriesStripe) findOrCreateEntryForSeries(fingerprint uint64, series labels.Labels, nowNanos int64, nativeHistogram bool, labelsCopy func(labels.Labels) labels.Labels) (*atomic.Int64, bool) { s.mu.Lock() defer s.mu.Unlock() @@ -129,9 +139,13 @@ func (s *activeSeriesStripe) findOrCreateEntryForSeries(fingerprint uint64, seri } s.active++ + if nativeHistogram { + s.activeNativeHistogram++ + } e := activeSeriesEntry{ - lbs: labelsCopy(series), - nanos: atomic.NewInt64(nowNanos), + lbs: labelsCopy(series), + nanos: atomic.NewInt64(nowNanos), + isNativeHistogram: nativeHistogram, } s.refs[fingerprint] = append(s.refs[fingerprint], e) @@ -160,6 +174,7 @@ func (s *activeSeriesStripe) purge(keepUntil time.Time) { defer s.mu.Unlock() active := 0 + activeNativeHistogram := 0 oldest := int64(math.MaxInt64) for fp, entries := range s.refs { @@ -173,6 +188,9 @@ func (s *activeSeriesStripe) purge(keepUntil time.Time) { } active++ + if entries[0].isNativeHistogram { + activeNativeHistogram++ + } if ts < oldest { oldest = ts } @@ -199,6 +217,11 @@ func (s *activeSeriesStripe) purge(keepUntil time.Time) { delete(s.refs, fp) } else { active += cnt + for _, e := range entries { + if e.isNativeHistogram { + activeNativeHistogram++ + } + } s.refs[fp] = entries } } @@ -209,6 +232,7 @@ func (s *activeSeriesStripe) purge(keepUntil time.Time) { s.oldestEntryTs.Store(oldest) } s.active = active + s.activeNativeHistogram = activeNativeHistogram } func (s *activeSeriesStripe) getActive() int { @@ -217,3 +241,10 @@ func (s *activeSeriesStripe) getActive() int { return s.active } + +func (s *activeSeriesStripe) getActiveNativeHistogram() int { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.activeNativeHistogram +} diff --git a/pkg/ingester/active_series_test.go b/pkg/ingester/active_series_test.go index dc97b0d4621..3d84d7570cc 100644 --- a/pkg/ingester/active_series_test.go +++ b/pkg/ingester/active_series_test.go @@ -26,16 +26,20 @@ func TestActiveSeries_UpdateSeries(t *testing.T) { c := NewActiveSeries() assert.Equal(t, 0, c.Active()) + assert.Equal(t, 0, c.ActiveNativeHistogram()) labels1Hash := fromLabelToLabels(ls1).Hash() labels2Hash := fromLabelToLabels(ls2).Hash() - c.UpdateSeries(ls1, labels1Hash, time.Now(), copyFn) + c.UpdateSeries(ls1, labels1Hash, time.Now(), true, copyFn) assert.Equal(t, 1, c.Active()) + assert.Equal(t, 1, c.ActiveNativeHistogram()) - c.UpdateSeries(ls1, labels1Hash, time.Now(), copyFn) + c.UpdateSeries(ls1, labels1Hash, time.Now(), true, copyFn) assert.Equal(t, 1, c.Active()) + assert.Equal(t, 1, c.ActiveNativeHistogram()) - c.UpdateSeries(ls2, labels2Hash, time.Now(), copyFn) + c.UpdateSeries(ls2, labels2Hash, time.Now(), true, copyFn) assert.Equal(t, 2, c.Active()) + assert.Equal(t, 2, c.ActiveNativeHistogram()) } func TestActiveSeries_Purge(t *testing.T) { @@ -52,7 +56,7 @@ func TestActiveSeries_Purge(t *testing.T) { c := NewActiveSeries() for i := 0; i < len(series); i++ { - c.UpdateSeries(series[i], fromLabelToLabels(series[i]).Hash(), time.Unix(int64(i), 0), copyFn) + c.UpdateSeries(series[i], fromLabelToLabels(series[i]).Hash(), time.Unix(int64(i), 0), true, copyFn) } c.Purge(time.Unix(int64(ttl+1), 0)) @@ -61,6 +65,7 @@ func TestActiveSeries_Purge(t *testing.T) { exp := len(series) - (ttl + 1) assert.Equal(t, exp, c.Active()) + assert.Equal(t, exp, c.ActiveNativeHistogram()) } } @@ -71,23 +76,26 @@ func TestActiveSeries_PurgeOpt(t *testing.T) { c := NewActiveSeries() now := time.Now() - c.UpdateSeries(ls1, ls1.Hash(), now.Add(-2*time.Minute), copyFn) - c.UpdateSeries(ls2, ls2.Hash(), now, copyFn) + c.UpdateSeries(ls1, ls1.Hash(), now.Add(-2*time.Minute), true, copyFn) + c.UpdateSeries(ls2, ls2.Hash(), now, true, copyFn) c.Purge(now) assert.Equal(t, 1, c.Active()) + assert.Equal(t, 1, c.ActiveNativeHistogram()) - c.UpdateSeries(ls1, ls1.Hash(), now.Add(-1*time.Minute), copyFn) - c.UpdateSeries(ls2, ls2.Hash(), now, copyFn) + c.UpdateSeries(ls1, ls1.Hash(), now.Add(-1*time.Minute), true, copyFn) + c.UpdateSeries(ls2, ls2.Hash(), now, true, copyFn) c.Purge(now) assert.Equal(t, 1, c.Active()) + assert.Equal(t, 1, c.ActiveNativeHistogram()) // This will *not* update the series, since there is already newer timestamp. - c.UpdateSeries(ls2, ls2.Hash(), now.Add(-1*time.Minute), copyFn) + c.UpdateSeries(ls2, ls2.Hash(), now.Add(-1*time.Minute), true, copyFn) c.Purge(now) assert.Equal(t, 1, c.Active()) + assert.Equal(t, 1, c.ActiveNativeHistogram()) } var activeSeriesTestGoroutines = []int{50, 100, 500} @@ -121,7 +129,7 @@ func benchmarkActiveSeriesConcurrencySingleSeries(b *testing.B, goroutines int) for ix := 0; ix < max; ix++ { now = now.Add(time.Duration(ix) * time.Millisecond) - c.UpdateSeries(series, labelhash, now, copyFn) + c.UpdateSeries(series, labelhash, now, false, copyFn) } }() } @@ -152,7 +160,7 @@ func BenchmarkActiveSeries_UpdateSeries(b *testing.B) { b.ResetTimer() for ix := 0; ix < b.N; ix++ { - c.UpdateSeries(series[ix], labelhash[ix], time.Unix(0, now+int64(ix)), copyFn) + c.UpdateSeries(series[ix], labelhash[ix], time.Unix(0, now+int64(ix)), false, copyFn) } } @@ -184,9 +192,9 @@ func benchmarkPurge(b *testing.B, twice bool) { // Prepare series for ix, s := range series { if ix < numExpiresSeries { - c.UpdateSeries(s, labelhash[ix], now.Add(-time.Minute), copyFn) + c.UpdateSeries(s, labelhash[ix], now.Add(-time.Minute), false, copyFn) } else { - c.UpdateSeries(s, labelhash[ix], now, copyFn) + c.UpdateSeries(s, labelhash[ix], now, false, copyFn) } } diff --git a/pkg/ingester/client/ingester.pb.go b/pkg/ingester/client/ingester.pb.go index 374348afae7..ae8937d9ed5 100644 --- a/pkg/ingester/client/ingester.pb.go +++ b/pkg/ingester/client/ingester.pb.go @@ -1077,6 +1077,9 @@ func (m *MetricsForLabelMatchersStreamResponse) GetMetric() []*cortexpb.Metric { } type MetricsMetadataRequest struct { + Limit int64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` + LimitPerMetric int64 `protobuf:"varint,2,opt,name=limit_per_metric,json=limitPerMetric,proto3" json:"limit_per_metric,omitempty"` + Metric string `protobuf:"bytes,3,opt,name=metric,proto3" json:"metric,omitempty"` } func (m *MetricsMetadataRequest) Reset() { *m = MetricsMetadataRequest{} } @@ -1111,6 +1114,27 @@ func (m *MetricsMetadataRequest) XXX_DiscardUnknown() { var xxx_messageInfo_MetricsMetadataRequest proto.InternalMessageInfo +func (m *MetricsMetadataRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *MetricsMetadataRequest) GetLimitPerMetric() int64 { + if m != nil { + return m.LimitPerMetric + } + return 0 +} + +func (m *MetricsMetadataRequest) GetMetric() string { + if m != nil { + return m.Metric + } + return "" +} + type MetricsMetadataResponse struct { Metadata []*cortexpb.MetricMetadata `protobuf:"bytes,1,rep,name=metadata,proto3" json:"metadata,omitempty"` } @@ -1484,91 +1508,93 @@ func init() { func init() { proto.RegisterFile("ingester.proto", fileDescriptor_60f6df4f3586b478) } var fileDescriptor_60f6df4f3586b478 = []byte{ - // 1339 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4b, 0x6f, 0x14, 0xc7, + // 1369 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x14, 0xc7, 0x13, 0xdf, 0xf1, 0x3e, 0xec, 0xad, 0x7d, 0xb0, 0x6e, 0x1b, 0xbc, 0x0c, 0x7f, 0xc6, 0x30, 0x88, 0x7f, 0xac, 0x24, 0xd8, 0xe0, 0x24, 0x12, 0xe4, 0x85, 0x6c, 0x30, 0x60, 0xc0, 0x18, 0xc6, 0x86, - 0x44, 0x51, 0xa2, 0xd1, 0x78, 0xb7, 0xb1, 0x27, 0xcc, 0x63, 0x99, 0xee, 0x41, 0x90, 0x53, 0xa2, - 0x7c, 0x80, 0xe4, 0x98, 0x6b, 0x6e, 0xf9, 0x00, 0xf9, 0x10, 0x1c, 0x39, 0xe4, 0x80, 0x72, 0x40, - 0x61, 0x91, 0xa2, 0x1c, 0xc9, 0x37, 0x88, 0xa6, 0x1f, 0xf3, 0xf2, 0xf8, 0x41, 0x04, 0xb9, 0xed, - 0x54, 0xfd, 0xaa, 0xba, 0xea, 0xd7, 0x55, 0x5d, 0xb5, 0xd0, 0xb6, 0xbd, 0x4d, 0x4c, 0x28, 0x0e, - 0x66, 0x07, 0x81, 0x4f, 0x7d, 0x54, 0xeb, 0xf9, 0x01, 0xc5, 0x0f, 0xd5, 0xc9, 0x4d, 0x7f, 0xd3, - 0x67, 0xa2, 0xb9, 0xe8, 0x17, 0xd7, 0xaa, 0xe7, 0x36, 0x6d, 0xba, 0x15, 0x6e, 0xcc, 0xf6, 0x7c, - 0x77, 0x8e, 0x03, 0x07, 0x81, 0xff, 0x35, 0xee, 0x51, 0xf1, 0x35, 0x37, 0xb8, 0xb7, 0x29, 0x15, - 0x1b, 0xe2, 0x07, 0x37, 0xd5, 0x3f, 0x81, 0x86, 0x81, 0xad, 0xbe, 0x81, 0xef, 0x87, 0x98, 0x50, - 0x34, 0x0b, 0xa3, 0xf7, 0x43, 0x1c, 0xd8, 0x98, 0x74, 0x95, 0x63, 0xe5, 0x99, 0xc6, 0xfc, 0xe4, - 0xac, 0x80, 0xdf, 0x0a, 0x71, 0xf0, 0x48, 0xc0, 0x0c, 0x09, 0xd2, 0xcf, 0x43, 0x93, 0x9b, 0x93, - 0x81, 0xef, 0x11, 0x8c, 0xe6, 0x60, 0x34, 0xc0, 0x24, 0x74, 0xa8, 0xb4, 0x3f, 0x98, 0xb3, 0xe7, - 0x38, 0x43, 0xa2, 0xf4, 0x6b, 0xd0, 0xca, 0x68, 0xd0, 0x87, 0x00, 0xd4, 0x76, 0x31, 0x29, 0x0a, - 0x62, 0xb0, 0x31, 0xbb, 0x6e, 0xbb, 0x78, 0x8d, 0xe9, 0x16, 0x2b, 0x8f, 0x9f, 0x4d, 0x97, 0x8c, - 0x14, 0x5a, 0xff, 0x49, 0x81, 0x66, 0x3a, 0x4e, 0xf4, 0x2e, 0x20, 0x42, 0xad, 0x80, 0x9a, 0x0c, - 0x44, 0x2d, 0x77, 0x60, 0xba, 0x91, 0x53, 0x65, 0xa6, 0x6c, 0x74, 0x98, 0x66, 0x5d, 0x2a, 0x56, - 0x08, 0x9a, 0x81, 0x0e, 0xf6, 0xfa, 0x59, 0xec, 0x08, 0xc3, 0xb6, 0xb1, 0xd7, 0x4f, 0x23, 0x4f, - 0xc3, 0x98, 0x6b, 0xd1, 0xde, 0x16, 0x0e, 0x48, 0xb7, 0x9c, 0xe5, 0xe9, 0xba, 0xb5, 0x81, 0x9d, - 0x15, 0xae, 0x34, 0x62, 0x94, 0xfe, 0xb3, 0x02, 0x93, 0x4b, 0x0f, 0xb1, 0x3b, 0x70, 0xac, 0xe0, - 0x3f, 0x09, 0xf1, 0xcc, 0xb6, 0x10, 0x0f, 0x16, 0x85, 0x48, 0x52, 0x31, 0x7e, 0x09, 0x13, 0x2c, - 0xb4, 0x35, 0x1a, 0x60, 0xcb, 0x8d, 0x6f, 0xe4, 0x3c, 0x34, 0x7a, 0x5b, 0xa1, 0x77, 0x2f, 0x73, - 0x25, 0x53, 0xd2, 0x59, 0x72, 0x21, 0x17, 0x22, 0x90, 0xb8, 0x95, 0xb4, 0xc5, 0xd5, 0xca, 0xd8, - 0x48, 0xa7, 0xac, 0xaf, 0xc1, 0xc1, 0x1c, 0x01, 0xaf, 0xe1, 0xc6, 0x7f, 0x53, 0x00, 0xb1, 0x74, - 0xee, 0x58, 0x4e, 0x88, 0x89, 0x24, 0xf5, 0x28, 0x80, 0x13, 0x49, 0x4d, 0xcf, 0x72, 0x31, 0x23, - 0xb3, 0x6e, 0xd4, 0x99, 0xe4, 0x86, 0xe5, 0xe2, 0x1d, 0x38, 0x1f, 0x79, 0x05, 0xce, 0xcb, 0x7b, - 0x72, 0x5e, 0x39, 0xa6, 0xec, 0x83, 0x73, 0x34, 0x09, 0x55, 0xc7, 0x76, 0x6d, 0xda, 0xad, 0x32, - 0x8f, 0xfc, 0x43, 0x3f, 0x0b, 0x13, 0x99, 0xac, 0x04, 0x53, 0xc7, 0xa1, 0xc9, 0xd3, 0x7a, 0xc0, - 0xe4, 0x8c, 0xab, 0xba, 0xd1, 0x70, 0x12, 0xa8, 0xfe, 0x29, 0x1c, 0x4e, 0x59, 0xe6, 0x6e, 0x72, - 0x1f, 0xf6, 0xbf, 0x2a, 0x30, 0x7e, 0x5d, 0x12, 0x45, 0xde, 0x74, 0x91, 0xc6, 0xd9, 0x97, 0x53, - 0xd9, 0xff, 0x0b, 0x1a, 0xf5, 0x0f, 0x44, 0x19, 0x88, 0xa8, 0x45, 0xbe, 0xd3, 0xd0, 0x48, 0xca, - 0x40, 0xa6, 0x0b, 0x71, 0x1d, 0x10, 0xfd, 0x23, 0xe8, 0x26, 0x66, 0x39, 0xb2, 0xf6, 0x34, 0x46, - 0xd0, 0xb9, 0x4d, 0x70, 0xb0, 0x46, 0x2d, 0x2a, 0x89, 0xd2, 0xbf, 0x1b, 0x81, 0xf1, 0x94, 0x50, - 0xb8, 0x3a, 0x29, 0xdf, 0x73, 0xdb, 0xf7, 0xcc, 0xc0, 0xa2, 0xbc, 0x24, 0x15, 0xa3, 0x15, 0x4b, - 0x0d, 0x8b, 0xe2, 0xa8, 0x6a, 0xbd, 0xd0, 0x35, 0x45, 0x23, 0x44, 0x8c, 0x55, 0x8c, 0xba, 0x17, - 0xba, 0xbc, 0xfa, 0xa3, 0x4b, 0xb0, 0x06, 0xb6, 0x99, 0xf3, 0x54, 0x66, 0x9e, 0x3a, 0xd6, 0xc0, - 0x5e, 0xce, 0x38, 0x9b, 0x85, 0x89, 0x20, 0x74, 0x70, 0x1e, 0x5e, 0x61, 0xf0, 0xf1, 0x48, 0x95, - 0xc5, 0x9f, 0x80, 0x96, 0xd5, 0xa3, 0xf6, 0x03, 0x2c, 0xcf, 0xaf, 0xb2, 0xf3, 0x9b, 0x5c, 0x28, - 0x42, 0x38, 0x01, 0x2d, 0xc7, 0xb7, 0xfa, 0xb8, 0x6f, 0x6e, 0x38, 0x7e, 0xef, 0x1e, 0xe9, 0xd6, - 0x38, 0x88, 0x0b, 0x17, 0x99, 0x4c, 0xff, 0x0a, 0x26, 0x22, 0x0a, 0x96, 0x2f, 0x66, 0x49, 0x98, - 0x82, 0xd1, 0x90, 0xe0, 0xc0, 0xb4, 0xfb, 0xa2, 0x21, 0x6b, 0xd1, 0xe7, 0x72, 0x1f, 0x9d, 0x82, - 0x4a, 0xdf, 0xa2, 0x16, 0x4b, 0xb8, 0x31, 0x7f, 0x58, 0x5e, 0xf5, 0x36, 0x1a, 0x0d, 0x06, 0xd3, - 0x2f, 0x03, 0x8a, 0x54, 0x24, 0xeb, 0xfd, 0x0c, 0x54, 0x49, 0x24, 0x10, 0xef, 0xc7, 0x91, 0xb4, - 0x97, 0x5c, 0x24, 0x06, 0x47, 0xea, 0x8f, 0x15, 0xd0, 0x56, 0x30, 0x0d, 0xec, 0x1e, 0xb9, 0xe4, - 0x07, 0xd9, 0xca, 0x7a, 0xc3, 0x75, 0x7f, 0x16, 0x9a, 0xb2, 0x74, 0x4d, 0x82, 0xe9, 0xee, 0x0f, - 0x74, 0x43, 0x42, 0xd7, 0x30, 0x4d, 0x3a, 0xa6, 0x92, 0x7e, 0x2f, 0xae, 0xc1, 0xf4, 0x8e, 0x99, + 0x44, 0x51, 0xa2, 0xd1, 0x78, 0xb7, 0xb1, 0x27, 0xcc, 0x8b, 0xe9, 0x5e, 0x04, 0x39, 0x25, 0xca, + 0x07, 0x48, 0x8e, 0xb9, 0xe6, 0x96, 0x0f, 0x90, 0x0f, 0xc1, 0x91, 0x43, 0x0e, 0x28, 0x07, 0x14, + 0x16, 0x29, 0xca, 0x91, 0x7c, 0x83, 0x68, 0xfa, 0x31, 0x2f, 0x8f, 0x1f, 0x44, 0x90, 0xdb, 0x74, + 0xd5, 0xaf, 0xaa, 0xab, 0x7e, 0x5d, 0xdd, 0x55, 0xbb, 0xd0, 0xb6, 0xbd, 0x4d, 0x4c, 0x28, 0x0e, + 0x67, 0x83, 0xd0, 0xa7, 0x3e, 0xaa, 0xf5, 0xfc, 0x90, 0xe2, 0x87, 0xea, 0xe4, 0xa6, 0xbf, 0xe9, + 0x33, 0xd1, 0x5c, 0xf4, 0xc5, 0xb5, 0xea, 0xb9, 0x4d, 0x9b, 0x6e, 0x0d, 0x36, 0x66, 0x7b, 0xbe, + 0x3b, 0xc7, 0x81, 0x41, 0xe8, 0x7f, 0x8d, 0x7b, 0x54, 0xac, 0xe6, 0x82, 0x7b, 0x9b, 0x52, 0xb1, + 0x21, 0x3e, 0xb8, 0xa9, 0xfe, 0x09, 0x34, 0x0c, 0x6c, 0xf5, 0x0d, 0x7c, 0x7f, 0x80, 0x09, 0x45, + 0xb3, 0x30, 0x7a, 0x7f, 0x80, 0x43, 0x1b, 0x93, 0xae, 0x72, 0xac, 0x3c, 0xd3, 0x98, 0x9f, 0x9c, + 0x15, 0xf0, 0x5b, 0x03, 0x1c, 0x3e, 0x12, 0x30, 0x43, 0x82, 0xf4, 0xf3, 0xd0, 0xe4, 0xe6, 0x24, + 0xf0, 0x3d, 0x82, 0xd1, 0x1c, 0x8c, 0x86, 0x98, 0x0c, 0x1c, 0x2a, 0xed, 0x0f, 0xe6, 0xec, 0x39, + 0xce, 0x90, 0x28, 0xfd, 0x1a, 0xb4, 0x32, 0x1a, 0xf4, 0x21, 0x00, 0xb5, 0x5d, 0x4c, 0x8a, 0x82, + 0x08, 0x36, 0x66, 0xd7, 0x6d, 0x17, 0xaf, 0x31, 0xdd, 0x62, 0xe5, 0xf1, 0xb3, 0xe9, 0x92, 0x91, + 0x42, 0xeb, 0x3f, 0x29, 0xd0, 0x4c, 0xc7, 0x89, 0xde, 0x05, 0x44, 0xa8, 0x15, 0x52, 0x93, 0x81, + 0xa8, 0xe5, 0x06, 0xa6, 0x1b, 0x39, 0x55, 0x66, 0xca, 0x46, 0x87, 0x69, 0xd6, 0xa5, 0x62, 0x85, + 0xa0, 0x19, 0xe8, 0x60, 0xaf, 0x9f, 0xc5, 0x8e, 0x30, 0x6c, 0x1b, 0x7b, 0xfd, 0x34, 0xf2, 0x34, + 0x8c, 0xb9, 0x16, 0xed, 0x6d, 0xe1, 0x90, 0x74, 0xcb, 0x59, 0x9e, 0xae, 0x5b, 0x1b, 0xd8, 0x59, + 0xe1, 0x4a, 0x23, 0x46, 0xe9, 0x3f, 0x2b, 0x30, 0xb9, 0xf4, 0x10, 0xbb, 0x81, 0x63, 0x85, 0xff, + 0x49, 0x88, 0x67, 0xb6, 0x85, 0x78, 0xb0, 0x28, 0x44, 0x92, 0x8a, 0xf1, 0x4b, 0x98, 0x60, 0xa1, + 0xad, 0xd1, 0x10, 0x5b, 0x6e, 0x7c, 0x22, 0xe7, 0xa1, 0xd1, 0xdb, 0x1a, 0x78, 0xf7, 0x32, 0x47, + 0x32, 0x25, 0x9d, 0x25, 0x07, 0x72, 0x21, 0x02, 0x89, 0x53, 0x49, 0x5b, 0x5c, 0xad, 0x8c, 0x8d, + 0x74, 0xca, 0xfa, 0x1a, 0x1c, 0xcc, 0x11, 0xf0, 0x1a, 0x4e, 0xfc, 0x37, 0x05, 0x10, 0x4b, 0xe7, + 0x8e, 0xe5, 0x0c, 0x30, 0x91, 0xa4, 0x1e, 0x05, 0x70, 0x22, 0xa9, 0xe9, 0x59, 0x2e, 0x66, 0x64, + 0xd6, 0x8d, 0x3a, 0x93, 0xdc, 0xb0, 0x5c, 0xbc, 0x03, 0xe7, 0x23, 0xaf, 0xc0, 0x79, 0x79, 0x4f, + 0xce, 0x2b, 0xc7, 0x94, 0x7d, 0x70, 0x8e, 0x26, 0xa1, 0xea, 0xd8, 0xae, 0x4d, 0xbb, 0x55, 0xe6, + 0x91, 0x2f, 0xf4, 0xb3, 0x30, 0x91, 0xc9, 0x4a, 0x30, 0x75, 0x1c, 0x9a, 0x3c, 0xad, 0x07, 0x4c, + 0xce, 0xb8, 0xaa, 0x1b, 0x0d, 0x27, 0x81, 0xea, 0x9f, 0xc2, 0xe1, 0x94, 0x65, 0xee, 0x24, 0xf7, + 0x61, 0xff, 0xab, 0x02, 0xe3, 0xd7, 0x25, 0x51, 0xe4, 0x4d, 0x17, 0x69, 0x9c, 0x7d, 0x39, 0x95, + 0xfd, 0xbf, 0xa0, 0x51, 0xff, 0x40, 0x94, 0x81, 0x88, 0x5a, 0xe4, 0x3b, 0x0d, 0x8d, 0xa4, 0x0c, + 0x64, 0xba, 0x10, 0xd7, 0x01, 0xd1, 0x3f, 0x82, 0x6e, 0x62, 0x96, 0x23, 0x6b, 0x4f, 0x63, 0x04, + 0x9d, 0xdb, 0x04, 0x87, 0x6b, 0xd4, 0xa2, 0x92, 0x28, 0xfd, 0xbb, 0x11, 0x18, 0x4f, 0x09, 0x85, + 0xab, 0x93, 0xf2, 0x3d, 0xb7, 0x7d, 0xcf, 0x0c, 0x2d, 0xca, 0x4b, 0x52, 0x31, 0x5a, 0xb1, 0xd4, + 0xb0, 0x28, 0x8e, 0xaa, 0xd6, 0x1b, 0xb8, 0xa6, 0xb8, 0x08, 0x11, 0x63, 0x15, 0xa3, 0xee, 0x0d, + 0x5c, 0x5e, 0xfd, 0xd1, 0x21, 0x58, 0x81, 0x6d, 0xe6, 0x3c, 0x95, 0x99, 0xa7, 0x8e, 0x15, 0xd8, + 0xcb, 0x19, 0x67, 0xb3, 0x30, 0x11, 0x0e, 0x1c, 0x9c, 0x87, 0x57, 0x18, 0x7c, 0x3c, 0x52, 0x65, + 0xf1, 0x27, 0xa0, 0x65, 0xf5, 0xa8, 0xfd, 0x00, 0xcb, 0xfd, 0xab, 0x6c, 0xff, 0x26, 0x17, 0x8a, + 0x10, 0x4e, 0x40, 0xcb, 0xf1, 0xad, 0x3e, 0xee, 0x9b, 0x1b, 0x8e, 0xdf, 0xbb, 0x47, 0xba, 0x35, + 0x0e, 0xe2, 0xc2, 0x45, 0x26, 0xd3, 0xbf, 0x82, 0x89, 0x88, 0x82, 0xe5, 0x8b, 0x59, 0x12, 0xa6, + 0x60, 0x74, 0x40, 0x70, 0x68, 0xda, 0x7d, 0x71, 0x21, 0x6b, 0xd1, 0x72, 0xb9, 0x8f, 0x4e, 0x41, + 0xa5, 0x6f, 0x51, 0x8b, 0x25, 0xdc, 0x98, 0x3f, 0x2c, 0x8f, 0x7a, 0x1b, 0x8d, 0x06, 0x83, 0xe9, + 0x97, 0x01, 0x45, 0x2a, 0x92, 0xf5, 0x7e, 0x06, 0xaa, 0x24, 0x12, 0x88, 0xf7, 0xe3, 0x48, 0xda, + 0x4b, 0x2e, 0x12, 0x83, 0x23, 0xf5, 0xc7, 0x0a, 0x68, 0x2b, 0x98, 0x86, 0x76, 0x8f, 0x5c, 0xf2, + 0xc3, 0x6c, 0x65, 0xbd, 0xe1, 0xba, 0x3f, 0x0b, 0x4d, 0x59, 0xba, 0x26, 0xc1, 0x74, 0xf7, 0x07, + 0xba, 0x21, 0xa1, 0x6b, 0x98, 0x26, 0x37, 0xa6, 0x92, 0x7e, 0x2f, 0xae, 0xc1, 0xf4, 0x8e, 0x99, 0x08, 0x82, 0x66, 0xa0, 0xe6, 0x32, 0x88, 0x60, 0xa8, 0x93, 0xbc, 0xb0, 0xdc, 0xd4, 0x10, 0x7a, - 0xfd, 0x16, 0x9c, 0xdc, 0xc1, 0x59, 0xae, 0x43, 0xf6, 0xef, 0xb2, 0x0b, 0x87, 0x84, 0xcb, 0x15, - 0x4c, 0xad, 0xe8, 0x1a, 0x65, 0xc3, 0xac, 0xc2, 0xd4, 0x36, 0x8d, 0x70, 0xff, 0x3e, 0x8c, 0xb9, - 0x42, 0x26, 0x0e, 0xe8, 0xe6, 0x0f, 0x88, 0x6d, 0x62, 0xa4, 0xfe, 0xb7, 0x02, 0x07, 0x72, 0x33, - 0x29, 0xba, 0x98, 0xbb, 0x81, 0xef, 0x9a, 0x72, 0xa9, 0x4a, 0x6a, 0xb0, 0x1d, 0xc9, 0x97, 0x85, - 0x78, 0xb9, 0x9f, 0x2e, 0xd2, 0x91, 0x4c, 0x91, 0x7a, 0x50, 0x63, 0xad, 0x2f, 0x87, 0xe9, 0x44, - 0x12, 0x0a, 0xa3, 0xe8, 0xa6, 0x65, 0x07, 0x8b, 0x0b, 0xd1, 0x7c, 0xfa, 0xfd, 0xd9, 0xf4, 0x2b, - 0xed, 0x63, 0xdc, 0x7e, 0xa1, 0x6f, 0x0d, 0x28, 0x0e, 0x0c, 0x71, 0x0a, 0x7a, 0x07, 0x6a, 0x7c, - 0x84, 0x76, 0x2b, 0xec, 0xbc, 0x96, 0xac, 0x8d, 0xf4, 0x94, 0x15, 0x10, 0xfd, 0x07, 0x05, 0xaa, - 0x3c, 0xd3, 0x37, 0x55, 0xb0, 0x2a, 0x8c, 0x61, 0xaf, 0xe7, 0xf7, 0x6d, 0x6f, 0x93, 0xbd, 0x38, - 0x55, 0x23, 0xfe, 0x46, 0x48, 0xf4, 0x6f, 0x54, 0x91, 0x4d, 0xd1, 0xa4, 0x0b, 0xd0, 0xca, 0x54, - 0x4e, 0x66, 0x63, 0x52, 0xf6, 0xb5, 0x31, 0x99, 0xd0, 0x4c, 0x6b, 0xd0, 0x49, 0xa8, 0xd0, 0x47, - 0x03, 0xfe, 0x74, 0xb6, 0xe7, 0xc7, 0xa5, 0x35, 0x53, 0xaf, 0x3f, 0x1a, 0x60, 0x83, 0xa9, 0xa3, - 0x68, 0xd8, 0xd0, 0xe7, 0xd7, 0xc7, 0x7e, 0x47, 0x4d, 0xc3, 0x26, 0x1e, 0x0b, 0xbd, 0x6e, 0xf0, - 0x0f, 0xfd, 0x7b, 0x05, 0xda, 0x49, 0xa5, 0x5c, 0xb2, 0x1d, 0xfc, 0x3a, 0x0a, 0x45, 0x85, 0xb1, - 0xbb, 0xb6, 0x83, 0x59, 0x0c, 0xfc, 0xb8, 0xf8, 0xbb, 0x88, 0xa9, 0xb7, 0xaf, 0x42, 0x3d, 0x4e, - 0x01, 0xd5, 0xa1, 0xba, 0x74, 0xeb, 0xf6, 0xc2, 0xf5, 0x4e, 0x09, 0xb5, 0xa0, 0x7e, 0x63, 0x75, - 0xdd, 0xe4, 0x9f, 0x0a, 0x3a, 0x00, 0x0d, 0x63, 0xe9, 0xf2, 0xd2, 0xe7, 0xe6, 0xca, 0xc2, 0xfa, - 0x85, 0x2b, 0x9d, 0x11, 0x84, 0xa0, 0xcd, 0x05, 0x37, 0x56, 0x85, 0xac, 0x3c, 0xff, 0xe7, 0x28, - 0x8c, 0xc9, 0x18, 0xd1, 0x39, 0xa8, 0xdc, 0x0c, 0xc9, 0x16, 0x3a, 0x94, 0x54, 0xea, 0x67, 0x81, - 0x4d, 0xb1, 0xe8, 0x3c, 0x75, 0x6a, 0x9b, 0x9c, 0xf7, 0x9d, 0x5e, 0x42, 0x17, 0xa1, 0x91, 0x5a, - 0x04, 0x51, 0xe1, 0x7f, 0x00, 0xf5, 0x48, 0x46, 0x9a, 0x7d, 0x1a, 0xf4, 0xd2, 0x69, 0x05, 0xad, - 0x42, 0x9b, 0xa9, 0xe4, 0xd6, 0x47, 0xd0, 0xff, 0xa4, 0x49, 0xd1, 0x26, 0xac, 0x1e, 0xdd, 0x41, - 0x1b, 0x87, 0x75, 0x05, 0x1a, 0xa9, 0xdd, 0x06, 0xa9, 0x99, 0x02, 0xca, 0x2c, 0x80, 0x49, 0x70, - 0x05, 0x6b, 0x94, 0x5e, 0x42, 0x77, 0xc4, 0x92, 0x93, 0xde, 0x92, 0x76, 0xf5, 0x77, 0xbc, 0x40, - 0x57, 0x90, 0xf2, 0x12, 0x40, 0xb2, 0x4f, 0xa0, 0xc3, 0x19, 0xa3, 0xf4, 0x42, 0xa5, 0xaa, 0x45, - 0xaa, 0x38, 0xbc, 0x35, 0xe8, 0xe4, 0xd7, 0x92, 0xdd, 0x9c, 0x1d, 0xdb, 0xae, 0x2a, 0x88, 0x6d, - 0x11, 0xea, 0xf1, 0x48, 0x45, 0xdd, 0x82, 0x29, 0xcb, 0x9d, 0xed, 0x3c, 0x7f, 0xf5, 0x12, 0xba, - 0x04, 0xcd, 0x05, 0xc7, 0xd9, 0x8f, 0x1b, 0x35, 0xad, 0x21, 0x79, 0x3f, 0x4e, 0xfc, 0xea, 0xe7, - 0x47, 0x0c, 0xfa, 0x7f, 0xdc, 0xd8, 0xbb, 0x8e, 0x66, 0xf5, 0xad, 0x3d, 0x71, 0xf1, 0x69, 0xdf, - 0xc0, 0xd1, 0x5d, 0x07, 0xda, 0xbe, 0xcf, 0x3c, 0xb5, 0x07, 0xae, 0x80, 0xf5, 0x75, 0x38, 0x90, - 0x9b, 0x6f, 0x48, 0xcb, 0x79, 0xc9, 0x8d, 0x44, 0x75, 0x7a, 0x47, 0xbd, 0xf4, 0xbb, 0xf8, 0xf1, - 0x93, 0xe7, 0x5a, 0xe9, 0xe9, 0x73, 0xad, 0xf4, 0xf2, 0xb9, 0xa6, 0x7c, 0x3b, 0xd4, 0x94, 0x5f, - 0x86, 0x9a, 0xf2, 0x78, 0xa8, 0x29, 0x4f, 0x86, 0x9a, 0xf2, 0xc7, 0x50, 0x53, 0xfe, 0x1a, 0x6a, - 0xa5, 0x97, 0x43, 0x4d, 0xf9, 0xf1, 0x85, 0x56, 0x7a, 0xf2, 0x42, 0x2b, 0x3d, 0x7d, 0xa1, 0x95, - 0xbe, 0xa8, 0xf5, 0x1c, 0x1b, 0x7b, 0x74, 0xa3, 0xc6, 0xfe, 0xfa, 0xbf, 0xf7, 0x4f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x84, 0xf7, 0x8d, 0x61, 0x65, 0x10, 0x00, 0x00, + 0xfd, 0x16, 0x9c, 0xdc, 0xc1, 0x59, 0xee, 0x86, 0xec, 0xdf, 0x65, 0x00, 0x87, 0x84, 0xcb, 0x15, + 0x4c, 0xad, 0xe8, 0x18, 0x25, 0xc3, 0x71, 0x3e, 0x4a, 0xfa, 0x05, 0x98, 0x81, 0x0e, 0xfb, 0x30, + 0x03, 0x1c, 0x9a, 0x62, 0x0f, 0xc1, 0x24, 0x93, 0xdf, 0xc4, 0x21, 0xf7, 0x87, 0x0e, 0xc5, 0x31, + 0x94, 0x79, 0x51, 0x89, 0x1d, 0x57, 0x61, 0x6a, 0xdb, 0x8e, 0x22, 0xec, 0xf7, 0x61, 0xcc, 0x15, + 0x32, 0x11, 0x78, 0x37, 0x1f, 0x78, 0x6c, 0x13, 0x23, 0xf5, 0xbf, 0x15, 0x38, 0x90, 0xeb, 0x75, + 0x51, 0x98, 0x77, 0x43, 0xdf, 0x35, 0xe5, 0xb0, 0x96, 0xd4, 0x76, 0x3b, 0x92, 0x2f, 0x0b, 0xf1, + 0x72, 0x3f, 0x5d, 0xfc, 0x23, 0x99, 0xe2, 0xf7, 0xa0, 0xc6, 0x9e, 0x14, 0xd9, 0xa4, 0x27, 0x92, + 0x50, 0x18, 0xf5, 0x37, 0x2d, 0x3b, 0x5c, 0x5c, 0x88, 0xfa, 0xde, 0xef, 0xcf, 0xa6, 0x5f, 0x69, + 0xce, 0xe3, 0xf6, 0x0b, 0x7d, 0x2b, 0xa0, 0x38, 0x34, 0xc4, 0x2e, 0xe8, 0x1d, 0xa8, 0xf1, 0xd6, + 0xdc, 0xad, 0xb0, 0xfd, 0x5a, 0xb2, 0xe6, 0xd2, 0xdd, 0x5b, 0x40, 0xf4, 0x1f, 0x14, 0xa8, 0xf2, + 0x4c, 0xdf, 0xd4, 0x45, 0x50, 0x61, 0x0c, 0x7b, 0x3d, 0xbf, 0x6f, 0x7b, 0x9b, 0xec, 0x00, 0xab, + 0x46, 0xbc, 0x46, 0x48, 0xbc, 0x0b, 0x51, 0xa5, 0x37, 0xc5, 0xe5, 0x5f, 0x80, 0x56, 0xa6, 0x22, + 0x33, 0x93, 0x98, 0xb2, 0xaf, 0x49, 0xcc, 0x84, 0x66, 0x5a, 0x83, 0x4e, 0x42, 0x85, 0x3e, 0x0a, + 0xf8, 0x93, 0xdc, 0x9e, 0x1f, 0x97, 0xd6, 0x4c, 0xbd, 0xfe, 0x28, 0xc0, 0x06, 0x53, 0x47, 0xd1, + 0xb0, 0x61, 0x82, 0x1f, 0x1f, 0xfb, 0x8e, 0x8a, 0x97, 0x75, 0x52, 0x51, 0x7b, 0x7c, 0xa1, 0x7f, + 0xaf, 0x40, 0x3b, 0xa9, 0x94, 0x4b, 0xb6, 0x83, 0x5f, 0x47, 0xa1, 0xa8, 0x30, 0x76, 0xd7, 0x76, + 0x30, 0x8b, 0x81, 0x6f, 0x17, 0xaf, 0x8b, 0x98, 0x7a, 0xfb, 0x2a, 0xd4, 0xe3, 0x14, 0x50, 0x1d, + 0xaa, 0x4b, 0xb7, 0x6e, 0x2f, 0x5c, 0xef, 0x94, 0x50, 0x0b, 0xea, 0x37, 0x56, 0xd7, 0x4d, 0xbe, + 0x54, 0xd0, 0x01, 0x68, 0x18, 0x4b, 0x97, 0x97, 0x3e, 0x37, 0x57, 0x16, 0xd6, 0x2f, 0x5c, 0xe9, + 0x8c, 0x20, 0x04, 0x6d, 0x2e, 0xb8, 0xb1, 0x2a, 0x64, 0xe5, 0xf9, 0x3f, 0x47, 0x61, 0x4c, 0xc6, + 0x88, 0xce, 0x41, 0xe5, 0xe6, 0x80, 0x6c, 0xa1, 0x43, 0x49, 0xa5, 0x7e, 0x16, 0xda, 0x14, 0x8b, + 0x1b, 0xad, 0x4e, 0x6d, 0x93, 0xf3, 0x7b, 0xa7, 0x97, 0xd0, 0x45, 0x68, 0xa4, 0x06, 0x4c, 0x54, + 0xf8, 0xdb, 0x42, 0x3d, 0x92, 0x91, 0x66, 0x9f, 0x1c, 0xbd, 0x74, 0x5a, 0x41, 0xab, 0xd0, 0x66, + 0x2a, 0x39, 0x4d, 0x12, 0xf4, 0x3f, 0x69, 0x52, 0x34, 0x61, 0xab, 0x47, 0x77, 0xd0, 0xc6, 0x61, + 0x5d, 0x81, 0x46, 0x6a, 0x66, 0x42, 0x6a, 0xa6, 0x80, 0x32, 0x83, 0x65, 0x12, 0x5c, 0xc1, 0x78, + 0xa6, 0x97, 0xd0, 0x1d, 0x31, 0x3c, 0xa5, 0xa7, 0xaf, 0x5d, 0xfd, 0x1d, 0x2f, 0xd0, 0x15, 0xa4, + 0xbc, 0x04, 0x90, 0xcc, 0x29, 0xe8, 0x70, 0xc6, 0x28, 0x3d, 0xa8, 0xa9, 0x6a, 0x91, 0x2a, 0x0e, + 0x6f, 0x0d, 0x3a, 0xf9, 0x71, 0x67, 0x37, 0x67, 0xc7, 0xb6, 0xab, 0x0a, 0x62, 0x5b, 0x84, 0x7a, + 0xdc, 0xaa, 0x51, 0xb7, 0xa0, 0x7b, 0x73, 0x67, 0x3b, 0xf7, 0x75, 0xbd, 0x84, 0x2e, 0x41, 0x73, + 0xc1, 0x71, 0xf6, 0xe3, 0x46, 0x4d, 0x6b, 0x48, 0xde, 0x8f, 0x13, 0xbf, 0xfa, 0xf9, 0xd6, 0x85, + 0xfe, 0x1f, 0x5f, 0xec, 0x5d, 0x5b, 0xbe, 0xfa, 0xd6, 0x9e, 0xb8, 0x78, 0xb7, 0x6f, 0xe0, 0xe8, + 0xae, 0x8d, 0x72, 0xdf, 0x7b, 0x9e, 0xda, 0x03, 0x57, 0xc0, 0xfa, 0x3a, 0x1c, 0xc8, 0xf5, 0x37, + 0xa4, 0xe5, 0xbc, 0xe4, 0x5a, 0xad, 0x3a, 0xbd, 0xa3, 0x5e, 0xfa, 0x5d, 0xfc, 0xf8, 0xc9, 0x73, + 0xad, 0xf4, 0xf4, 0xb9, 0x56, 0x7a, 0xf9, 0x5c, 0x53, 0xbe, 0x1d, 0x6a, 0xca, 0x2f, 0x43, 0x4d, + 0x79, 0x3c, 0xd4, 0x94, 0x27, 0x43, 0x4d, 0xf9, 0x63, 0xa8, 0x29, 0x7f, 0x0d, 0xb5, 0xd2, 0xcb, + 0xa1, 0xa6, 0xfc, 0xf8, 0x42, 0x2b, 0x3d, 0x79, 0xa1, 0x95, 0x9e, 0xbe, 0xd0, 0x4a, 0x5f, 0xd4, + 0x7a, 0x8e, 0x8d, 0x3d, 0xba, 0x51, 0x63, 0x7f, 0x29, 0xbc, 0xf7, 0x4f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x73, 0x37, 0x7c, 0x02, 0xbd, 0x10, 0x00, 0x00, } func (x MatchType) String() string { @@ -2209,6 +2235,15 @@ func (this *MetricsMetadataRequest) Equal(that interface{}) bool { } else if this == nil { return false } + if this.Limit != that1.Limit { + return false + } + if this.LimitPerMetric != that1.LimitPerMetric { + return false + } + if this.Metric != that1.Metric { + return false + } return true } func (this *MetricsMetadataResponse) Equal(that interface{}) bool { @@ -2671,8 +2706,11 @@ func (this *MetricsMetadataRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 4) + s := make([]string, 0, 7) s = append(s, "&client.MetricsMetadataRequest{") + s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") + s = append(s, "LimitPerMetric: "+fmt.Sprintf("%#v", this.LimitPerMetric)+",\n") + s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -4169,6 +4207,23 @@ func (m *MetricsMetadataRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if len(m.Metric) > 0 { + i -= len(m.Metric) + copy(dAtA[i:], m.Metric) + i = encodeVarintIngester(dAtA, i, uint64(len(m.Metric))) + i-- + dAtA[i] = 0x1a + } + if m.LimitPerMetric != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.LimitPerMetric)) + i-- + dAtA[i] = 0x10 + } + if m.Limit != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } @@ -4813,6 +4868,16 @@ func (m *MetricsMetadataRequest) Size() (n int) { } var l int _ = l + if m.Limit != 0 { + n += 1 + sovIngester(uint64(m.Limit)) + } + if m.LimitPerMetric != 0 { + n += 1 + sovIngester(uint64(m.LimitPerMetric)) + } + l = len(m.Metric) + if l > 0 { + n += 1 + l + sovIngester(uint64(l)) + } return n } @@ -5227,6 +5292,9 @@ func (this *MetricsMetadataRequest) String() string { return "nil" } s := strings.Join([]string{`&MetricsMetadataRequest{`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `LimitPerMetric:` + fmt.Sprintf("%v", this.LimitPerMetric) + `,`, + `Metric:` + fmt.Sprintf("%v", this.Metric) + `,`, `}`, }, "") return s @@ -7425,6 +7493,76 @@ func (m *MetricsMetadataRequest) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: MetricsMetadataRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitPerMetric", wireType) + } + m.LimitPerMetric = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LimitPerMetric |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metric = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) diff --git a/pkg/ingester/client/ingester.proto b/pkg/ingester/client/ingester.proto index 68f343693e6..0cbfac49c93 100644 --- a/pkg/ingester/client/ingester.proto +++ b/pkg/ingester/client/ingester.proto @@ -129,6 +129,9 @@ message MetricsForLabelMatchersStreamResponse { } message MetricsMetadataRequest { + int64 limit = 1; + int64 limit_per_metric = 2; + string metric = 3; } message MetricsMetadataResponse { diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 86a3a3dfdd6..e4eebef9911 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -56,8 +56,11 @@ import ( "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/concurrency" "github.com/cortexproject/cortex/pkg/util/extract" + "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/limiter" logutil "github.com/cortexproject/cortex/pkg/util/log" util_math "github.com/cortexproject/cortex/pkg/util/math" + "github.com/cortexproject/cortex/pkg/util/resource" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" @@ -167,23 +170,17 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.ActiveSeriesMetricsIdleTimeout, "ingester.active-series-metrics-idle-timeout", 10*time.Minute, "After what time a series is considered to be inactive.") f.BoolVar(&cfg.UploadCompactedBlocksEnabled, "ingester.upload-compacted-blocks-enabled", true, "Enable uploading compacted blocks.") - f.Float64Var(&cfg.DefaultLimits.MaxIngestionRate, "ingester.instance-limits.max-ingestion-rate", 0, "Max ingestion rate (samples/sec) that ingester will accept. This limit is per-ingester, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. This limit only works when using blocks engine. 0 = unlimited.") - f.Int64Var(&cfg.DefaultLimits.MaxInMemoryTenants, "ingester.instance-limits.max-tenants", 0, "Max users that this ingester can hold. Requests from additional users will be rejected. This limit only works when using blocks engine. 0 = unlimited.") - f.Int64Var(&cfg.DefaultLimits.MaxInMemorySeries, "ingester.instance-limits.max-series", 0, "Max series that this ingester can hold (across all tenants). Requests to create additional series will be rejected. This limit only works when using blocks engine. 0 = unlimited.") - f.Int64Var(&cfg.DefaultLimits.MaxInflightPushRequests, "ingester.instance-limits.max-inflight-push-requests", 0, "Max inflight push requests that this ingester can handle (across all tenants). Additional requests will be rejected. 0 = unlimited.") - f.Int64Var(&cfg.DefaultLimits.MaxInflightQueryRequests, "ingester.instance-limits.max-inflight-query-requests", 0, "Max inflight query requests that this ingester can handle (across all tenants). Additional requests will be rejected. 0 = unlimited.") - f.StringVar(&cfg.IgnoreSeriesLimitForMetricNames, "ingester.ignore-series-limit-for-metric-names", "", "Comma-separated list of metric names, for which -ingester.max-series-per-metric and -ingester.max-global-series-per-metric limits will be ignored. Does not affect max-series-per-user or max-global-series-per-metric limits.") - f.StringVar(&cfg.AdminLimitMessage, "ingester.admin-limit-message", "please contact administrator to raise it", "Customize the message contained in limit errors") - f.BoolVar(&cfg.LabelsStringInterningEnabled, "ingester.labels-string-interning-enabled", false, "Experimental: Enable string interning for metrics labels.") f.BoolVar(&cfg.DisableChunkTrimming, "ingester.disable-chunk-trimming", false, "Disable trimming of matching series chunks based on query Start and End time. When disabled, the result may contain samples outside the queried time range but select performances may be improved. Note that certain query results might change by changing this option.") f.IntVar(&cfg.MatchersCacheMaxItems, "ingester.matchers-cache-max-items", 0, "Maximum number of entries in the regex matchers cache. 0 to disable.") + + cfg.DefaultLimits.RegisterFlagsWithPrefix(f, "ingester.") } -func (cfg *Config) Validate() error { +func (cfg *Config) Validate(monitoredResources flagext.StringSliceCSV) error { if err := cfg.LifecyclerConfig.Validate(); err != nil { return err } @@ -192,6 +189,10 @@ func (cfg *Config) Validate() error { logutil.WarnExperimentalUse("String interning for metrics labels Enabled") } + if err := cfg.DefaultLimits.Validate(monitoredResources); err != nil { + return err + } + return nil } @@ -228,10 +229,11 @@ type Ingester struct { logger log.Logger - lifecycler *ring.Lifecycler - limits *validation.Overrides - limiter *Limiter - subservicesWatcher *services.FailureWatcher + lifecycler *ring.Lifecycler + limits *validation.Overrides + limiter *Limiter + resourceBasedLimiter *limiter.ResourceBasedLimiter + subservicesWatcher *services.FailureWatcher stoppedMtx sync.RWMutex // protects stopped stopped bool // protected by stoppedMtx @@ -699,7 +701,7 @@ func newTSDBState(bucketClient objstore.Bucket, registerer prometheus.Registerer } // New returns a new Ingester that uses Cortex block storage instead of chunks storage. -func New(cfg Config, limits *validation.Overrides, registerer prometheus.Registerer, logger log.Logger) (*Ingester, error) { +func New(cfg Config, limits *validation.Overrides, registerer prometheus.Registerer, logger log.Logger, resourceMonitor *resource.Monitor) (*Ingester, error) { defaultInstanceLimits = &cfg.DefaultLimits if cfg.ingesterClientFactory == nil { cfg.ingesterClientFactory = client.MakeIngesterClient @@ -780,6 +782,20 @@ func New(cfg Config, limits *validation.Overrides, registerer prometheus.Registe i.TSDBState.compactionIdleTimeout = util.DurationWithPositiveJitter(i.cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout, compactionIdleTimeoutJitter) level.Info(i.logger).Log("msg", "TSDB idle compaction timeout set", "timeout", i.TSDBState.compactionIdleTimeout) + if resourceMonitor != nil { + resourceLimits := make(map[resource.Type]float64) + if cfg.DefaultLimits.CPUUtilization > 0 { + resourceLimits[resource.CPU] = cfg.DefaultLimits.CPUUtilization + } + if cfg.DefaultLimits.HeapUtilization > 0 { + resourceLimits[resource.Heap] = cfg.DefaultLimits.HeapUtilization + } + i.resourceBasedLimiter, err = limiter.NewResourceBasedLimiter(resourceMonitor, resourceLimits, registerer, "ingester") + if err != nil { + return nil, errors.Wrap(err, "error creating resource based limiter") + } + } + i.BasicService = services.NewBasicService(i.starting, i.updateLoop, i.stopping) return i, nil } @@ -1038,6 +1054,7 @@ func (i *Ingester) updateActiveSeries(ctx context.Context) { userDB.activeSeries.Purge(purgeTime) i.metrics.activeSeriesPerUser.WithLabelValues(userID).Set(float64(userDB.activeSeries.Active())) + i.metrics.activeNHSeriesPerUser.WithLabelValues(userID).Set(float64(userDB.activeSeries.ActiveNativeHistogram())) if err := userDB.labelSetCounter.UpdateMetric(ctx, userDB, i.metrics); err != nil { level.Warn(i.logger).Log("msg", "failed to update per labelSet metrics", "user", userID, "err", err) } @@ -1376,9 +1393,11 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte } else { discardedNativeHistogramCount += len(ts.Histograms) } - shouldUpdateSeries := (succeededSamplesCount > oldSucceededSamplesCount) || (succeededHistogramsCount > oldSucceededHistogramsCount) + + isNHAppended := succeededHistogramsCount > oldSucceededHistogramsCount + shouldUpdateSeries := (succeededSamplesCount > oldSucceededSamplesCount) || isNHAppended if i.cfg.ActiveSeriesMetricsEnabled && shouldUpdateSeries { - db.activeSeries.UpdateSeries(tsLabels, tsLabelsHash, startAppend, func(l labels.Labels) labels.Labels { + db.activeSeries.UpdateSeries(tsLabels, tsLabelsHash, startAppend, isNHAppended, func(l labels.Labels) labels.Labels { // we must already have copied the labels if succeededSamplesCount or succeededHistogramsCount has been incremented. return copiedLabels }) @@ -1950,7 +1969,7 @@ func (i *Ingester) metricsForLabelMatchersCommon(ctx context.Context, req *clien } // MetricsMetadata returns all the metric metadata of a user. -func (i *Ingester) MetricsMetadata(ctx context.Context, _ *client.MetricsMetadataRequest) (*client.MetricsMetadataResponse, error) { +func (i *Ingester) MetricsMetadata(ctx context.Context, req *client.MetricsMetadataRequest) (*client.MetricsMetadataResponse, error) { i.stoppedMtx.RLock() if err := i.checkRunningOrStopping(); err != nil { i.stoppedMtx.RUnlock() @@ -1969,7 +1988,7 @@ func (i *Ingester) MetricsMetadata(ctx context.Context, _ *client.MetricsMetadat return &client.MetricsMetadataResponse{}, nil } - return &client.MetricsMetadataResponse{Metadata: userMetadata.toClientMetadata()}, nil + return &client.MetricsMetadataResponse{Metadata: userMetadata.toClientMetadata(req)}, nil } // CheckReady is the readiness handler used to indicate to k8s when the ingesters @@ -2152,6 +2171,14 @@ func (i *Ingester) trackInflightQueryRequest() (func(), error) { } i.maxInflightQueryRequests.Track(i.inflightQueryRequests.Inc()) + + if i.resourceBasedLimiter != nil { + if err := i.resourceBasedLimiter.AcceptNewRequest(); err != nil { + level.Warn(i.logger).Log("msg", "failed to accept request", "err", err) + return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "failed to query: %s", limiter.ErrResourceLimitReachedStr) + } + } + return func() { i.inflightQueryRequests.Dec() }, nil @@ -2386,7 +2413,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { instanceLimitsFn: i.getInstanceLimits, instanceSeriesCount: &i.TSDBState.seriesCount, - interner: util.NewLruInterner(), + interner: util.NewLruInterner(i.cfg.LabelsStringInterningEnabled), labelsStringInterningEnabled: i.cfg.LabelsStringInterningEnabled, blockRetentionPeriod: i.cfg.BlocksStorageConfig.TSDB.Retention.Milliseconds(), @@ -2526,6 +2553,7 @@ func (i *Ingester) closeAllTSDB() { i.metrics.memUsers.Dec() i.metrics.activeSeriesPerUser.DeleteLabelValues(userID) + i.metrics.activeNHSeriesPerUser.DeleteLabelValues(userID) }(userDB) } diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index c22668b828f..dda43c4ba97 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -58,6 +58,8 @@ import ( "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/chunkcompat" + "github.com/cortexproject/cortex/pkg/util/limiter" + "github.com/cortexproject/cortex/pkg/util/resource" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" "github.com/cortexproject/cortex/pkg/util/validation" @@ -834,7 +836,7 @@ func TestIngesterUserLimitExceeded(t *testing.T) { require.Equal(t, expected, res) // Verify metadata - m, err := ing.MetricsMetadata(ctx, nil) + m, err := ing.MetricsMetadata(ctx, &client.MetricsMetadataRequest{Limit: -1, LimitPerMetric: -1, Metric: ""}) require.NoError(t, err) assert.Equal(t, []*cortexpb.MetricMetadata{metadata1}, m.Metadata) @@ -965,7 +967,7 @@ func TestIngesterMetricLimitExceeded(t *testing.T) { assert.Equal(t, expected, res) // Verify metadata - m, err := ing.MetricsMetadata(ctx, nil) + m, err := ing.MetricsMetadata(ctx, &client.MetricsMetadataRequest{Limit: -1, LimitPerMetric: -1, Metric: ""}) require.NoError(t, err) assert.Equal(t, []*cortexpb.MetricMetadata{metadata1}, m.Metadata) @@ -1051,7 +1053,7 @@ func TestIngester_Push(t *testing.T) { expectedMetadataIngested: []*cortexpb.MetricMetadata{ {MetricFamilyName: "metric_name_2", Help: "a help for metric_name_2", Unit: "", Type: cortexpb.GAUGE}, }, - additionalMetrics: []string{"cortex_discarded_samples_total", "cortex_ingester_active_series"}, + additionalMetrics: []string{"cortex_discarded_samples_total", "cortex_ingester_active_series", "cortex_ingester_active_native_histogram_series"}, disableNativeHistogram: true, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -1078,6 +1080,9 @@ func TestIngester_Push(t *testing.T) { # HELP cortex_ingester_active_series Number of currently active series per user. # TYPE cortex_ingester_active_series gauge cortex_ingester_active_series{user="test"} 1 + # HELP cortex_ingester_active_native_histogram_series Number of currently active native histogram series per user. + # TYPE cortex_ingester_active_native_histogram_series gauge + cortex_ingester_active_native_histogram_series{user="test"} 0 `, }, "should succeed on valid series and metadata": { @@ -1114,6 +1119,7 @@ func TestIngester_Push(t *testing.T) { "cortex_ingester_ingested_metadata_total", "cortex_ingester_ingested_metadata_failures_total", "cortex_ingester_active_series", + "cortex_ingester_active_native_histogram_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_metadata_failures_total The total number of metadata that errored on ingestion. @@ -1149,6 +1155,9 @@ func TestIngester_Push(t *testing.T) { # HELP cortex_ingester_active_series Number of currently active series per user. # TYPE cortex_ingester_active_series gauge cortex_ingester_active_series{user="test"} 1 + # HELP cortex_ingester_active_native_histogram_series Number of currently active native histogram series per user. + # TYPE cortex_ingester_active_native_histogram_series gauge + cortex_ingester_active_native_histogram_series{user="test"} 0 `, }, "should succeed on valid series with exemplars": { @@ -1212,6 +1221,7 @@ func TestIngester_Push(t *testing.T) { "cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds", "cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total", "cortex_ingester_active_series", + "cortex_ingester_active_native_histogram_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -1235,6 +1245,9 @@ func TestIngester_Push(t *testing.T) { # HELP cortex_ingester_active_series Number of currently active series per user. # TYPE cortex_ingester_active_series gauge cortex_ingester_active_series{user="test"} 1 + # HELP cortex_ingester_active_native_histogram_series Number of currently active native histogram series per user. + # TYPE cortex_ingester_active_native_histogram_series gauge + cortex_ingester_active_native_histogram_series{user="test"} 0 # HELP cortex_ingester_tsdb_exemplar_exemplars_appended_total Total number of TSDB exemplars appended. # TYPE cortex_ingester_tsdb_exemplar_exemplars_appended_total counter @@ -1324,6 +1337,7 @@ func TestIngester_Push(t *testing.T) { "cortex_ingester_tsdb_head_out_of_order_samples_appended_total", "cortex_discarded_samples_total", "cortex_ingester_active_series", + "cortex_ingester_active_native_histogram_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -1364,6 +1378,9 @@ func TestIngester_Push(t *testing.T) { # HELP cortex_ingester_active_series Number of currently active series per user. # TYPE cortex_ingester_active_series gauge cortex_ingester_active_series{user="test"} 1 + # HELP cortex_ingester_active_native_histogram_series Number of currently active native histogram series per user. + # TYPE cortex_ingester_active_native_histogram_series gauge + cortex_ingester_active_native_histogram_series{user="test"} 0 `, }, "ooo disabled, should soft fail on sample out of bound": { @@ -1387,7 +1404,7 @@ func TestIngester_Push(t *testing.T) { expectedIngested: []cortexpb.TimeSeries{ {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}}, }, - additionalMetrics: []string{"cortex_ingester_active_series"}, + additionalMetrics: []string{"cortex_ingester_active_series", "cortex_ingester_active_native_histogram_series"}, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. # TYPE cortex_ingester_ingested_samples_total counter @@ -1419,6 +1436,9 @@ func TestIngester_Push(t *testing.T) { # HELP cortex_ingester_active_series Number of currently active series per user. # TYPE cortex_ingester_active_series gauge cortex_ingester_active_series{user="test"} 1 + # HELP cortex_ingester_active_native_histogram_series Number of currently active native histogram series per user. + # TYPE cortex_ingester_active_native_histogram_series gauge + cortex_ingester_active_native_histogram_series{user="test"} 0 `, }, "ooo enabled, should soft fail on sample too old": { @@ -1444,6 +1464,7 @@ func TestIngester_Push(t *testing.T) { additionalMetrics: []string{ "cortex_discarded_samples_total", "cortex_ingester_active_series", + "cortex_ingester_active_native_histogram_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -1470,6 +1491,9 @@ func TestIngester_Push(t *testing.T) { # HELP cortex_ingester_active_series Number of currently active series per user. # TYPE cortex_ingester_active_series gauge cortex_ingester_active_series{user="test"} 1 + # HELP cortex_ingester_active_native_histogram_series Number of currently active native histogram series per user. + # TYPE cortex_ingester_active_native_histogram_series gauge + cortex_ingester_active_native_histogram_series{user="test"} 0 `, }, "ooo enabled, should succeed": { @@ -1491,7 +1515,7 @@ func TestIngester_Push(t *testing.T) { expectedIngested: []cortexpb.TimeSeries{ {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 1575043969 - (60 * 1000)}, {Value: 2, TimestampMs: 1575043969}}}, }, - additionalMetrics: []string{"cortex_ingester_active_series"}, + additionalMetrics: []string{"cortex_ingester_active_series", "cortex_ingester_active_native_histogram_series"}, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. # TYPE cortex_ingester_ingested_samples_total counter @@ -1514,6 +1538,9 @@ func TestIngester_Push(t *testing.T) { # HELP cortex_ingester_active_series Number of currently active series per user. # TYPE cortex_ingester_active_series gauge cortex_ingester_active_series{user="test"} 1 + # HELP cortex_ingester_active_native_histogram_series Number of currently active native histogram series per user. + # TYPE cortex_ingester_active_native_histogram_series gauge + cortex_ingester_active_native_histogram_series{user="test"} 0 `, }, "native histogram ooo enabled, should soft fail on sample too old": { @@ -1539,6 +1566,7 @@ func TestIngester_Push(t *testing.T) { additionalMetrics: []string{ "cortex_ingester_tsdb_head_samples_appended_total", "cortex_ingester_active_series", + "cortex_ingester_active_native_histogram_series", "cortex_discarded_samples_total", }, expectedMetrics: ` @@ -1576,6 +1604,9 @@ func TestIngester_Push(t *testing.T) { # HELP cortex_discarded_samples_total The total number of samples that were discarded. # TYPE cortex_discarded_samples_total counter cortex_discarded_samples_total{reason="sample-too-old",user="test"} 1 + # HELP cortex_ingester_active_native_histogram_series Number of currently active native histogram series per user. + # TYPE cortex_ingester_active_native_histogram_series gauge + cortex_ingester_active_native_histogram_series{user="test"} 1 `, }, "native histogram ooo enabled, should succeed": { @@ -1600,6 +1631,7 @@ func TestIngester_Push(t *testing.T) { additionalMetrics: []string{ "cortex_ingester_tsdb_head_samples_appended_total", "cortex_ingester_active_series", + "cortex_ingester_active_native_histogram_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion. @@ -1633,6 +1665,9 @@ func TestIngester_Push(t *testing.T) { # HELP cortex_ingester_active_series Number of currently active series per user. # TYPE cortex_ingester_active_series gauge cortex_ingester_active_series{user="test"} 1 + # HELP cortex_ingester_active_native_histogram_series Number of currently active native histogram series per user. + # TYPE cortex_ingester_active_native_histogram_series gauge + cortex_ingester_active_native_histogram_series{user="test"} 1 `, }, "should soft fail on two different sample values at the same timestamp": { @@ -1654,7 +1689,7 @@ func TestIngester_Push(t *testing.T) { expectedIngested: []cortexpb.TimeSeries{ {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}}, }, - additionalMetrics: []string{"cortex_discarded_samples_total", "cortex_ingester_active_series"}, + additionalMetrics: []string{"cortex_discarded_samples_total", "cortex_ingester_active_series", "cortex_ingester_active_native_histogram_series"}, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. # TYPE cortex_ingester_ingested_samples_total counter @@ -1680,6 +1715,9 @@ func TestIngester_Push(t *testing.T) { # HELP cortex_ingester_active_series Number of currently active series per user. # TYPE cortex_ingester_active_series gauge cortex_ingester_active_series{user="test"} 1 + # HELP cortex_ingester_active_native_histogram_series Number of currently active native histogram series per user. + # TYPE cortex_ingester_active_native_histogram_series gauge + cortex_ingester_active_native_histogram_series{user="test"} 0 `, }, "should soft fail on exemplar with unknown series": { @@ -1714,6 +1752,7 @@ func TestIngester_Push(t *testing.T) { "cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds", "cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total", "cortex_ingester_active_series", + "cortex_ingester_active_native_histogram_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -1737,6 +1776,9 @@ func TestIngester_Push(t *testing.T) { # HELP cortex_ingester_active_series Number of currently active series per user. # TYPE cortex_ingester_active_series gauge cortex_ingester_active_series{user="test"} 0 + # HELP cortex_ingester_active_native_histogram_series Number of currently active native histogram series per user. + # TYPE cortex_ingester_active_native_histogram_series gauge + cortex_ingester_active_native_histogram_series{user="test"} 0 # HELP cortex_ingester_tsdb_exemplar_exemplars_appended_total Total number of TSDB exemplars appended. # TYPE cortex_ingester_tsdb_exemplar_exemplars_appended_total counter @@ -1775,6 +1817,7 @@ func TestIngester_Push(t *testing.T) { additionalMetrics: []string{ "cortex_ingester_tsdb_head_samples_appended_total", "cortex_ingester_active_series", + "cortex_ingester_active_native_histogram_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion. @@ -1813,6 +1856,9 @@ func TestIngester_Push(t *testing.T) { # HELP cortex_ingester_active_series Number of currently active series per user. # TYPE cortex_ingester_active_series gauge cortex_ingester_active_series{user="test"} 1 + # HELP cortex_ingester_active_native_histogram_series Number of currently active native histogram series per user. + # TYPE cortex_ingester_active_native_histogram_series gauge + cortex_ingester_active_native_histogram_series{user="test"} 1 `, }, "should succeed when only float native histogram present if enabled": { @@ -1831,6 +1877,7 @@ func TestIngester_Push(t *testing.T) { additionalMetrics: []string{ "cortex_ingester_tsdb_head_samples_appended_total", "cortex_ingester_active_series", + "cortex_ingester_active_native_histogram_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -1869,64 +1916,9 @@ func TestIngester_Push(t *testing.T) { # HELP cortex_ingester_active_series Number of currently active series per user. # TYPE cortex_ingester_active_series gauge cortex_ingester_active_series{user="test"} 1 - `, - }, - "should fail to ingest histogram due to OOO native histogram. Sample and histogram has same timestamp but sample got ingested first": { - reqs: []*cortexpb.WriteRequest{ - cortexpb.ToWriteRequest( - []labels.Labels{metricLabels}, - []cortexpb.Sample{{Value: 2, TimestampMs: 11}}, - nil, - []cortexpb.Histogram{testHistogram}, - cortexpb.API), - }, - expectedErr: nil, - expectedIngested: []cortexpb.TimeSeries{ - {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 11}}}, - }, - additionalMetrics: []string{ - "cortex_ingester_tsdb_head_samples_appended_total", - "cortex_ingester_tsdb_out_of_order_samples_total", - "cortex_ingester_active_series", - }, - expectedMetrics: ` - # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. - # TYPE cortex_ingester_ingested_samples_total counter - cortex_ingester_ingested_samples_total 1 - # HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion. - # TYPE cortex_ingester_ingested_samples_failures_total counter - cortex_ingester_ingested_samples_failures_total 0 - # HELP cortex_ingester_ingested_native_histograms_total The total number of native histograms ingested. - # TYPE cortex_ingester_ingested_native_histograms_total counter - cortex_ingester_ingested_native_histograms_total 1 - # HELP cortex_ingester_ingested_native_histograms_failures_total The total number of native histograms that errored on ingestion. - # TYPE cortex_ingester_ingested_native_histograms_failures_total counter - cortex_ingester_ingested_native_histograms_failures_total 0 - # HELP cortex_ingester_memory_users The current number of users in memory. - # TYPE cortex_ingester_memory_users gauge - cortex_ingester_memory_users 1 - # HELP cortex_ingester_tsdb_head_samples_appended_total Total number of appended samples. - # TYPE cortex_ingester_tsdb_head_samples_appended_total counter - cortex_ingester_tsdb_head_samples_appended_total{type="float",user="test"} 1 - cortex_ingester_tsdb_head_samples_appended_total{type="histogram",user="test"} 0 - # HELP cortex_ingester_tsdb_out_of_order_samples_total Total number of out of order samples ingestion failed attempts due to out of order being disabled. - # TYPE cortex_ingester_tsdb_out_of_order_samples_total counter - cortex_ingester_tsdb_out_of_order_samples_total{type="float",user="test"} 0 - cortex_ingester_tsdb_out_of_order_samples_total{type="histogram",user="test"} 1 - # HELP cortex_ingester_memory_series The current number of series in memory. - # TYPE cortex_ingester_memory_series gauge - cortex_ingester_memory_series 1 - # HELP cortex_ingester_memory_series_created_total The total number of series that were created per user. - # TYPE cortex_ingester_memory_series_created_total counter - cortex_ingester_memory_series_created_total{user="test"} 1 - # HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user. - # TYPE cortex_ingester_memory_series_removed_total counter - cortex_ingester_memory_series_removed_total{user="test"} 0 - # HELP cortex_discarded_samples_total The total number of samples that were discarded. - # TYPE cortex_discarded_samples_total counter - # HELP cortex_ingester_active_series Number of currently active series per user. - # TYPE cortex_ingester_active_series gauge - cortex_ingester_active_series{user="test"} 1 + # HELP cortex_ingester_active_native_histogram_series Number of currently active native histogram series per user. + # TYPE cortex_ingester_active_native_histogram_series gauge + cortex_ingester_active_native_histogram_series{user="test"} 1 `, }, } @@ -2008,7 +2000,7 @@ func TestIngester_Push(t *testing.T) { assert.Equal(t, testData.expectedExemplarsIngested, exemplarRes.Timeseries) // Read back metadata to see what has been really ingested. - mres, err := i.MetricsMetadata(ctx, &client.MetricsMetadataRequest{}) + mres, err := i.MetricsMetadata(ctx, &client.MetricsMetadataRequest{Limit: -1, LimitPerMetric: -1, Metric: ""}) require.NoError(t, err) require.NotNil(t, mres) @@ -3058,6 +3050,49 @@ func TestIngester_Query_ShouldNotCreateTSDBIfDoesNotExists(t *testing.T) { assert.False(t, tsdbCreated) } +func Test_Ingester_Query_ResourceThresholdBreached(t *testing.T) { + series := []struct { + lbls labels.Labels + value float64 + timestamp int64 + }{ + {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, + } + + i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) + defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck + + limits := map[resource.Type]float64{ + resource.CPU: 0.5, + resource.Heap: 0.5, + } + i.resourceBasedLimiter, err = limiter.NewResourceBasedLimiter(&mockResourceMonitor{cpu: 0.4, heap: 0.6}, limits, nil, "ingester") + require.NoError(t, err) + + // Wait until it's ACTIVE + test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + return i.lifecycler.GetState() + }) + + // Push series + ctx := user.InjectOrgID(context.Background(), "test") + + for _, series := range series { + req, _ := mockWriteRequest(t, series.lbls, series.value, series.timestamp) + _, err := i.Push(ctx, req) + require.NoError(t, err) + } + + rreq := &client.QueryRequest{} + s := &mockQueryStreamServer{ctx: ctx} + err = i.QueryStream(rreq, s) + require.Error(t, err) + exhaustedErr := limiter.ResourceLimitReachedError{} + require.ErrorContains(t, err, exhaustedErr.Error()) +} + func TestIngester_LabelValues_ShouldNotCreateTSDBIfDoesNotExists(t *testing.T) { i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) require.NoError(t, err) @@ -3933,7 +3968,7 @@ func prepareIngesterWithBlocksStorageAndLimits(t testing.TB, ingesterCfg Config, ingesterCfg.BlocksStorageConfig.Bucket.Filesystem.Directory = bucketDir ingesterCfg.BlocksStorageConfig.TSDB.EnableNativeHistograms = nativeHistograms - ingester, err := New(ingesterCfg, overrides, registerer, log.NewNopLogger()) + ingester, err := New(ingesterCfg, overrides, registerer, log.NewNopLogger(), nil) if err != nil { return nil, err } @@ -3941,6 +3976,19 @@ func prepareIngesterWithBlocksStorageAndLimits(t testing.TB, ingesterCfg Config, return ingester, nil } +type mockResourceMonitor struct { + cpu float64 + heap float64 +} + +func (m *mockResourceMonitor) GetCPUUtilization() float64 { + return m.cpu +} + +func (m *mockResourceMonitor) GetHeapUtilization() float64 { + return m.heap +} + func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { t.Parallel() @@ -4096,7 +4144,7 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { // setup the tsdbs dir testData.setup(t, tempDir) - ingester, err := New(ingesterCfg, overrides, prometheus.NewRegistry(), log.NewNopLogger()) + ingester, err := New(ingesterCfg, overrides, prometheus.NewRegistry(), log.NewNopLogger(), nil) require.NoError(t, err) startErr := services.StartAndAwaitRunning(context.Background(), ingester) @@ -5305,7 +5353,7 @@ func TestHeadCompactionOnStartup(t *testing.T) { ingesterCfg.BlocksStorageConfig.Bucket.S3.Endpoint = "localhost" ingesterCfg.BlocksStorageConfig.TSDB.Retention = 2 * 24 * time.Hour // Make sure that no newly created blocks are deleted. - ingester, err := New(ingesterCfg, overrides, prometheus.NewRegistry(), log.NewNopLogger()) + ingester, err := New(ingesterCfg, overrides, prometheus.NewRegistry(), log.NewNopLogger(), nil) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ingester)) diff --git a/pkg/ingester/instance_limits.go b/pkg/ingester/instance_limits.go index d3b4671152b..334d1250e60 100644 --- a/pkg/ingester/instance_limits.go +++ b/pkg/ingester/instance_limits.go @@ -1,6 +1,13 @@ package ingester -import "github.com/pkg/errors" +import ( + "flag" + + "github.com/pkg/errors" + + "github.com/cortexproject/cortex/pkg/configs" + "github.com/cortexproject/cortex/pkg/util/flagext" +) var ( // We don't include values in the message to avoid leaking Cortex cluster configuration to users. @@ -11,9 +18,10 @@ var ( errTooManyInflightQueryRequests = errors.New("cannot push: too many inflight query requests in ingester") ) -// InstanceLimits describes limits used by ingester. Reaching any of these will result in Push method to return -// (internal) error. +// InstanceLimits describes limits used by ingester. Reaching any of these will result in error response to the call. type InstanceLimits struct { + configs.InstanceLimits `yaml:",inline"` + MaxIngestionRate float64 `yaml:"max_ingestion_rate"` MaxInMemoryTenants int64 `yaml:"max_tenants"` MaxInMemorySeries int64 `yaml:"max_series"` @@ -24,6 +32,19 @@ type InstanceLimits struct { // Sets default limit values for unmarshalling. var defaultInstanceLimits *InstanceLimits +func (cfg *InstanceLimits) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { + f.Float64Var(&cfg.MaxIngestionRate, prefix+"instance-limits.max-ingestion-rate", 0, "Max ingestion rate (samples/sec) that ingester will accept. This limit is per-ingester, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. This limit only works when using blocks engine. 0 = unlimited.") + f.Int64Var(&cfg.MaxInMemoryTenants, prefix+"instance-limits.max-tenants", 0, "Max users that this ingester can hold. Requests from additional users will be rejected. This limit only works when using blocks engine. 0 = unlimited.") + f.Int64Var(&cfg.MaxInMemorySeries, prefix+"instance-limits.max-series", 0, "Max series that this ingester can hold (across all tenants). Requests to create additional series will be rejected. This limit only works when using blocks engine. 0 = unlimited.") + f.Int64Var(&cfg.MaxInflightPushRequests, prefix+"instance-limits.max-inflight-push-requests", 0, "Max inflight push requests that this ingester can handle (across all tenants). Additional requests will be rejected. 0 = unlimited.") + f.Int64Var(&cfg.MaxInflightQueryRequests, prefix+"instance-limits.max-inflight-query-requests", 0, "Max inflight query requests that this ingester can handle (across all tenants). Additional requests will be rejected. 0 = unlimited.") + cfg.InstanceLimits.RegisterFlagsWithPrefix(f, prefix) +} + +func (cfg *InstanceLimits) Validate(monitoredResources flagext.StringSliceCSV) error { + return cfg.InstanceLimits.Validate(monitoredResources) +} + // UnmarshalYAML implements the yaml.Unmarshaler interface. If give func (l *InstanceLimits) UnmarshalYAML(unmarshal func(interface{}) error) error { if defaultInstanceLimits != nil { diff --git a/pkg/ingester/metrics.go b/pkg/ingester/metrics.go index 6cfe49dc1a2..bcb8148149d 100644 --- a/pkg/ingester/metrics.go +++ b/pkg/ingester/metrics.go @@ -44,9 +44,10 @@ type ingesterMetrics struct { memSeriesRemovedTotal *prometheus.CounterVec memMetadataRemovedTotal *prometheus.CounterVec - activeSeriesPerUser *prometheus.GaugeVec - limitsPerLabelSet *prometheus.GaugeVec - usagePerLabelSet *prometheus.GaugeVec + activeSeriesPerUser *prometheus.GaugeVec + activeNHSeriesPerUser *prometheus.GaugeVec + limitsPerLabelSet *prometheus.GaugeVec + usagePerLabelSet *prometheus.GaugeVec // Global limit metrics maxUsersGauge prometheus.GaugeFunc @@ -249,6 +250,12 @@ func newIngesterMetrics(r prometheus.Registerer, Name: "cortex_ingester_active_series", Help: "Number of currently active series per user.", }, []string{"user"}), + + // Not registered automatically, but only if activeSeriesEnabled is true. + activeNHSeriesPerUser: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_ingester_active_native_histogram_series", + Help: "Number of currently active native histogram series per user.", + }, []string{"user"}), } if postingsCacheEnabled && r != nil { @@ -257,6 +264,7 @@ func newIngesterMetrics(r prometheus.Registerer, if activeSeriesEnabled && r != nil { r.MustRegister(m.activeSeriesPerUser) + r.MustRegister(m.activeNHSeriesPerUser) } if createMetricsConflictingWithTSDB { @@ -278,6 +286,7 @@ func (m *ingesterMetrics) deletePerUserMetrics(userID string) { m.memMetadataCreatedTotal.DeleteLabelValues(userID) m.memMetadataRemovedTotal.DeleteLabelValues(userID) m.activeSeriesPerUser.DeleteLabelValues(userID) + m.activeNHSeriesPerUser.DeleteLabelValues(userID) m.usagePerLabelSet.DeletePartialMatch(prometheus.Labels{"user": userID}) m.limitsPerLabelSet.DeletePartialMatch(prometheus.Labels{"user": userID}) diff --git a/pkg/ingester/user_metrics_metadata.go b/pkg/ingester/user_metrics_metadata.go index dcb5fd6bbf4..8f451c884b4 100644 --- a/pkg/ingester/user_metrics_metadata.go +++ b/pkg/ingester/user_metrics_metadata.go @@ -7,6 +7,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/cortexproject/cortex/pkg/cortexpb" + "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -84,18 +85,46 @@ func (mm *userMetricsMetadata) purge(deadline time.Time) { mm.metrics.memMetadataRemovedTotal.WithLabelValues(mm.userID).Add(float64(deleted)) } -func (mm *userMetricsMetadata) toClientMetadata() []*cortexpb.MetricMetadata { +func (mm *userMetricsMetadata) toClientMetadata(req *client.MetricsMetadataRequest) []*cortexpb.MetricMetadata { mm.mtx.RLock() defer mm.mtx.RUnlock() r := make([]*cortexpb.MetricMetadata, 0, len(mm.metricToMetadata)) + if req.Limit == 0 { + return r + } + + if req.Metric != "" { + metadataSet, ok := mm.metricToMetadata[req.Metric] + if !ok { + return r + } + + metadataSet.add(req.LimitPerMetric, &r) + return r + } + + var metrics int64 for _, set := range mm.metricToMetadata { - for m := range set { - r = append(r, &m) + if req.Limit > 0 && metrics >= req.Limit { + break } + set.add(req.LimitPerMetric, &r) + metrics++ } return r } +func (mns metricMetadataSet) add(limitPerMetric int64, r *[]*cortexpb.MetricMetadata) { + var metrics int64 + for m := range mns { + if limitPerMetric > 0 && metrics >= limitPerMetric { + return + } + *r = append(*r, &m) + metrics++ + } +} + type metricMetadataSet map[cortexpb.MetricMetadata]time.Time // If deadline is zero time, all metrics are purged. diff --git a/pkg/ingester/user_metrics_metadata_test.go b/pkg/ingester/user_metrics_metadata_test.go new file mode 100644 index 00000000000..2a28601ced3 --- /dev/null +++ b/pkg/ingester/user_metrics_metadata_test.go @@ -0,0 +1,135 @@ +package ingester + +import ( + "fmt" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/pkg/cortexpb" + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/util" + util_math "github.com/cortexproject/cortex/pkg/util/math" + "github.com/cortexproject/cortex/pkg/util/validation" +) + +const ( + defaultLimit = -1 + defaultLimitPerMetric = -1 +) + +func Test_UserMetricsMetadata(t *testing.T) { + userId := "user-1" + + reg := prometheus.NewPedanticRegistry() + ingestionRate := util_math.NewEWMARate(0.2, instanceIngestionRateTickInterval) + inflightPushRequests := util_math.MaxTracker{} + maxInflightQueryRequests := util_math.MaxTracker{} + + m := newIngesterMetrics(reg, + false, + false, + func() *InstanceLimits { + return &InstanceLimits{} + }, + ingestionRate, + &inflightPushRequests, + &maxInflightQueryRequests, + false) + + limits := validation.Limits{} + overrides, err := validation.NewOverrides(limits, nil) + require.NoError(t, err) + limiter := NewLimiter(overrides, nil, util.ShardingStrategyDefault, true, 1, false, "") + + userMetricsMetadata := newMetadataMap(limiter, m, validation.NewValidateMetrics(reg), userId) + + addMetricMetadata := func(name string, i int) { + metadata := &cortexpb.MetricMetadata{ + MetricFamilyName: fmt.Sprintf("%s_%d", name, i), + Type: cortexpb.GAUGE, + Help: fmt.Sprintf("a help for %s", name), + Unit: fmt.Sprintf("a unit for %s", name), + } + + err := userMetricsMetadata.add(name, metadata) + require.NoError(t, err) + } + + metadataNumPerMetric := 3 + for _, m := range []string{"metric1", "metric2"} { + for i := range metadataNumPerMetric { + addMetricMetadata(m, i) + } + } + + tests := []struct { + description string + limit int64 + limitPerMetric int64 + metric string + expectedLength int + }{ + { + description: "limit: 1", + limit: 1, + limitPerMetric: defaultLimitPerMetric, + expectedLength: 3, + }, + { + description: "limit: 0", + limit: 0, + limitPerMetric: defaultLimitPerMetric, + expectedLength: 0, + }, + { + description: "limit_per_metric: 2", + limit: defaultLimit, + limitPerMetric: 2, + expectedLength: 4, + }, + { + description: "limit: 0, limit_per_metric: 2", + limit: 1, + limitPerMetric: 2, + expectedLength: 2, + }, + { + description: "limit: 1, limit_per_metric: 0 (should be ignored)", + limit: 1, + limitPerMetric: 0, + expectedLength: 3, + }, + { + description: "metric: metric1", + limit: defaultLimit, + limitPerMetric: defaultLimitPerMetric, + metric: "metric1", + expectedLength: 3, + }, + { + description: "metric: metric1, limit_per_metric: 2", + limit: defaultLimit, + limitPerMetric: 2, + metric: "metric1", + expectedLength: 2, + }, + { + description: "not exist metric", + limit: 1, + limitPerMetric: defaultLimitPerMetric, + metric: "dummy", + expectedLength: 0, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + req := &client.MetricsMetadataRequest{Limit: test.limit, LimitPerMetric: test.limitPerMetric, Metric: test.metric} + + r := userMetricsMetadata.toClientMetadata(req) + require.Equal(t, test.expectedLength, len(r)) + }) + } +} diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index c7953482451..a5647e55451 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -1203,6 +1203,12 @@ func countSamplesAndChunks(series ...*storepb.Series) (samplesCount, chunksCount // only retry connection issues func isRetryableError(err error) bool { + // retry upon resource exhaustion error from resource monitor + var resourceExhaustedErr *limiter.ResourceLimitReachedError + if errors.As(err, &resourceExhaustedErr) { + return true + } + switch status.Code(err) { case codes.Unavailable: return true diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 162e606d4ff..3ca5c03fee2 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -1518,6 +1518,35 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { limits: &blocksStoreLimitsMock{}, expectedErr: errors.Wrapf(status.Error(codes.ResourceExhausted, "some other resource"), "failed to fetch series from 1.1.1.1"), }, + "multiple store-gateways has the block, but one of them had resource exhausted error from resource monitor": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{ + remoteAddr: "1.1.1.1", + mockedSeriesErr: &limiter.ResourceLimitReachedError{}, + }: {block1}, + }, + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockHintsResponse(block1), + }}: {block1}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: noOpQueryLimiter, + expectedSeries: []seriesResult{ + { + lbls: labels.New(metricNameLabel, series1Label), + values: []valueResult{ + {t: minT, v: 2}, + }, + }, + }, + }, } for testName, testData := range tests { diff --git a/pkg/querier/distributor_queryable.go b/pkg/querier/distributor_queryable.go index 46965c8eee3..dffe8ae3002 100644 --- a/pkg/querier/distributor_queryable.go +++ b/pkg/querier/distributor_queryable.go @@ -35,7 +35,7 @@ type Distributor interface { LabelNamesStream(context.Context, model.Time, model.Time, *storage.LabelHints, bool, ...*labels.Matcher) ([]string, error) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, hint *storage.SelectHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]model.Metric, error) MetricsForLabelMatchersStream(ctx context.Context, from, through model.Time, hint *storage.SelectHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]model.Metric, error) - MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) + MetricsMetadata(ctx context.Context, req *client.MetricsMetadataRequest) ([]scrape.MetricMetadata, error) } func newDistributorQueryable(distributor Distributor, streamingMetdata bool, labelNamesWithMatchers bool, iteratorFn chunkIteratorFunc, queryIngestersWithin time.Duration, isPartialDataEnabled partialdata.IsCfgEnabledFunc) QueryableWithFilter { diff --git a/pkg/querier/metadata_handler.go b/pkg/querier/metadata_handler.go index e185cdc6084..9eeeb0b1ad7 100644 --- a/pkg/querier/metadata_handler.go +++ b/pkg/querier/metadata_handler.go @@ -2,15 +2,22 @@ package querier import ( "context" + "fmt" "net/http" + "strconv" "github.com/prometheus/prometheus/scrape" + "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/util" ) +const ( + defaultLimit = -1 +) + type MetadataQuerier interface { - MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) + MetricsMetadata(ctx context.Context, req *client.MetricsMetadataRequest) ([]scrape.MetricMetadata, error) } type metricMetadata struct { @@ -24,20 +31,41 @@ const ( statusError = "error" ) -type metadataResult struct { +type metadataSuccessResult struct { Status string `json:"status"` - Data map[string][]metricMetadata `json:"data,omitempty"` - Error string `json:"error,omitempty"` + Data map[string][]metricMetadata `json:"data"` +} + +type metadataErrorResult struct { + Status string `json:"status"` + Error string `json:"error"` } // MetadataHandler returns metric metadata held by Cortex for a given tenant. // It is kept and returned as a set. func MetadataHandler(m MetadataQuerier) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - resp, err := m.MetricsMetadata(r.Context()) + limit, err := validateLimits("limit", w, r) + if err != nil { + return + } + + limitPerMetric, err := validateLimits("limit_per_metric", w, r) + if err != nil { + return + } + + metric := r.FormValue("metric") + req := &client.MetricsMetadataRequest{ + Limit: int64(limit), + LimitPerMetric: int64(limitPerMetric), + Metric: metric, + } + + resp, err := m.MetricsMetadata(r.Context(), req) if err != nil { w.WriteHeader(http.StatusBadRequest) - util.WriteJSONResponse(w, metadataResult{Status: statusError, Error: err.Error()}) + util.WriteJSONResponse(w, metadataErrorResult{Status: statusError, Error: err.Error()}) return } @@ -45,7 +73,17 @@ func MetadataHandler(m MetadataQuerier) http.Handler { metrics := map[string][]metricMetadata{} for _, m := range resp { ms, ok := metrics[m.MetricFamily] + // We have to check limit both ingester and here since the ingester only check + // for one user, it cannot handle the case when the mergeMetadataQuerier + // (tenant-federation) is used. + if limitPerMetric > 0 && len(ms) >= limitPerMetric { + continue + } + if !ok { + if limit >= 0 && len(metrics) >= limit { + break + } // Most metrics will only hold 1 copy of the same metadata. ms = make([]metricMetadata, 0, 1) metrics[m.MetricFamily] = ms @@ -53,6 +91,19 @@ func MetadataHandler(m MetadataQuerier) http.Handler { metrics[m.MetricFamily] = append(ms, metricMetadata{Type: string(m.Type), Help: m.Help, Unit: m.Unit}) } - util.WriteJSONResponse(w, metadataResult{Status: statusSuccess, Data: metrics}) + util.WriteJSONResponse(w, metadataSuccessResult{Status: statusSuccess, Data: metrics}) }) } + +func validateLimits(name string, w http.ResponseWriter, r *http.Request) (int, error) { + v := defaultLimit + if s := r.FormValue(name); s != "" { + var err error + if v, err = strconv.Atoi(s); err != nil { + w.WriteHeader(http.StatusBadRequest) + util.WriteJSONResponse(w, metadataErrorResult{Status: statusError, Error: fmt.Sprintf("%s must be a number", name)}) + return 0, err + } + } + return v, nil +} diff --git a/pkg/querier/metadata_handler_test.go b/pkg/querier/metadata_handler_test.go index c47daa1e014..a2c35f2fe98 100644 --- a/pkg/querier/metadata_handler_test.go +++ b/pkg/querier/metadata_handler_test.go @@ -5,6 +5,7 @@ import ( "io" "net/http" "net/http/httptest" + "net/url" "testing" "github.com/prometheus/prometheus/scrape" @@ -16,47 +17,193 @@ func TestMetadataHandler_Success(t *testing.T) { t.Parallel() d := &MockDistributor{} - d.On("MetricsMetadata", mock.Anything).Return( + d.On("MetricsMetadata", mock.Anything, mock.Anything).Return( []scrape.MetricMetadata{ {MetricFamily: "alertmanager_dispatcher_aggregation_groups", Help: "Number of active aggregation groups", Type: "gauge", Unit: ""}, + {MetricFamily: "go_threads", Help: "Number of OS threads created", Type: "gauge", Unit: ""}, + {MetricFamily: "go_threads", Help: "Number of OS threads that were created", Type: "gauge", Unit: ""}, }, nil) - handler := MetadataHandler(d) - - request, err := http.NewRequest("GET", "/metadata", nil) - require.NoError(t, err) - - recorder := httptest.NewRecorder() - handler.ServeHTTP(recorder, request) + fullResponseJson := ` + { + "status": "success", + "data": { + "alertmanager_dispatcher_aggregation_groups": [ + { + "help": "Number of active aggregation groups", + "type": "gauge", + "unit": "" + } + ], + "go_threads": [ + { + "help": "Number of OS threads created", + "type": "gauge", + "unit": "" + }, + { + "help": "Number of OS threads that were created", + "type": "gauge", + "unit": "" + } + ] + } + } + ` - require.Equal(t, http.StatusOK, recorder.Result().StatusCode) - responseBody, err := io.ReadAll(recorder.Result().Body) - require.NoError(t, err) + emptyDataResponseJson := ` + { + "status": "success", + "data": {} + } + ` - expectedJSON := ` - { - "status": "success", - "data": { - "alertmanager_dispatcher_aggregation_groups": [ + tests := []struct { + description string + queryParams url.Values + expectedCode int + expectedJson string + }{ + { + description: "no params", + queryParams: url.Values{}, + expectedCode: http.StatusOK, + expectedJson: fullResponseJson, + }, + { + description: "limit: -1", + queryParams: url.Values{ + "limit": []string{"-1"}, + }, + expectedCode: http.StatusOK, + expectedJson: fullResponseJson, + }, + { + description: "limit: 0", + queryParams: url.Values{ + "limit": []string{"0"}, + }, + expectedCode: http.StatusOK, + expectedJson: emptyDataResponseJson, + }, + { + description: "limit: 1", + queryParams: url.Values{ + "limit": []string{"1"}, + }, + expectedCode: http.StatusOK, + expectedJson: ` { - "help": "Number of active aggregation groups", - "type": "gauge", - "unit": "" + "status": "success", + "data": { + "alertmanager_dispatcher_aggregation_groups": [ + { + "help": "Number of active aggregation groups", + "type": "gauge", + "unit": "" + } + ] + } } - ] - } + `, + }, + { + description: "limit: invalid", + queryParams: url.Values{ + "limit": []string{"aaa"}, + }, + expectedCode: http.StatusBadRequest, + expectedJson: ` + { + "status": "error", + "error": "limit must be a number" + } + `, + }, + { + description: "limit_per_metric: -1", + queryParams: url.Values{ + "limit_per_metric": []string{"-1"}, + }, + expectedCode: http.StatusOK, + expectedJson: fullResponseJson, + }, + { + description: "limit_per_metric: 0, should be ignored", + queryParams: url.Values{ + "limit_per_metric": []string{"0"}, + }, + expectedCode: http.StatusOK, + expectedJson: fullResponseJson, + }, + { + description: "limit_per_metric: 1", + queryParams: url.Values{ + "limit_per_metric": []string{"1"}, + }, + expectedCode: http.StatusOK, + expectedJson: ` + { + "status": "success", + "data": { + "alertmanager_dispatcher_aggregation_groups": [ + { + "help": "Number of active aggregation groups", + "type": "gauge", + "unit": "" + } + ], + "go_threads": [ + { + "help": "Number of OS threads created", + "type": "gauge", + "unit": "" + } + ] + } + } + `, + }, + { + description: "limit_per_metric: invalid", + queryParams: url.Values{ + "limit_per_metric": []string{"aaa"}, + }, + expectedCode: http.StatusBadRequest, + expectedJson: ` + { + "status": "error", + "error": "limit_per_metric must be a number" + } + `, + }, } - ` - require.JSONEq(t, expectedJSON, string(responseBody)) + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + handler := MetadataHandler(d) + + request, err := http.NewRequest("GET", "/metadata", nil) + request.URL.RawQuery = test.queryParams.Encode() + require.NoError(t, err) + + recorder := httptest.NewRecorder() + handler.ServeHTTP(recorder, request) + + require.Equal(t, test.expectedCode, recorder.Result().StatusCode) + responseBody, err := io.ReadAll(recorder.Result().Body) + require.NoError(t, err) + require.JSONEq(t, test.expectedJson, string(responseBody)) + }) + } } func TestMetadataHandler_Error(t *testing.T) { t.Parallel() d := &MockDistributor{} - d.On("MetricsMetadata", mock.Anything).Return([]scrape.MetricMetadata{}, fmt.Errorf("no user id")) + d.On("MetricsMetadata", mock.Anything, mock.Anything).Return([]scrape.MetricMetadata{}, fmt.Errorf("no user id")) handler := MetadataHandler(d) diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index c87bfce0e68..d2865408abe 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -1392,7 +1392,7 @@ func (m *errDistributor) MetricsForLabelMatchersStream(ctx context.Context, from return nil, errDistributorError } -func (m *errDistributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) { +func (m *errDistributor) MetricsMetadata(ctx context.Context, request *client.MetricsMetadataRequest) ([]scrape.MetricMetadata, error) { return nil, errDistributorError } @@ -1448,7 +1448,7 @@ func (d *emptyDistributor) MetricsForLabelMatchersStream(ctx context.Context, fr return nil, nil } -func (d *emptyDistributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) { +func (d *emptyDistributor) MetricsMetadata(ctx context.Context, request *client.MetricsMetadataRequest) ([]scrape.MetricMetadata, error) { return nil, nil } diff --git a/pkg/querier/tenantfederation/metadata_merge_querier.go b/pkg/querier/tenantfederation/metadata_merge_querier.go index 4a51a19d653..611bfbe1f55 100644 --- a/pkg/querier/tenantfederation/metadata_merge_querier.go +++ b/pkg/querier/tenantfederation/metadata_merge_querier.go @@ -10,6 +10,7 @@ import ( "github.com/prometheus/prometheus/scrape" "github.com/weaveworks/common/user" + "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/concurrency" @@ -45,7 +46,7 @@ type metadataSelectJob struct { } // MetricsMetadata returns aggregated metadata for multiple tenants -func (m *mergeMetadataQuerier) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) { +func (m *mergeMetadataQuerier) MetricsMetadata(ctx context.Context, req *client.MetricsMetadataRequest) ([]scrape.MetricMetadata, error) { log, ctx := spanlogger.New(ctx, "mergeMetadataQuerier.MetricsMetadata") defer log.Span.Finish() @@ -57,7 +58,7 @@ func (m *mergeMetadataQuerier) MetricsMetadata(ctx context.Context) ([]scrape.Me m.tenantsPerMetadataQuery.Observe(float64(len(tenantIds))) if len(tenantIds) == 1 { - return m.upstream.MetricsMetadata(ctx) + return m.upstream.MetricsMetadata(ctx, req) } jobs := make([]interface{}, len(tenantIds)) @@ -79,7 +80,7 @@ func (m *mergeMetadataQuerier) MetricsMetadata(ctx context.Context) ([]scrape.Me return fmt.Errorf("unexpected type %T", jobIntf) } - res, err := job.querier.MetricsMetadata(user.InjectOrgID(ctx, job.id)) + res, err := job.querier.MetricsMetadata(user.InjectOrgID(ctx, job.id), req) if err != nil { return errors.Wrapf(err, "error exemplars querying %s %s", job.id, err) } diff --git a/pkg/querier/tenantfederation/metadata_merge_querier_test.go b/pkg/querier/tenantfederation/metadata_merge_querier_test.go index 58e5f955ba5..a9a93147338 100644 --- a/pkg/querier/tenantfederation/metadata_merge_querier_test.go +++ b/pkg/querier/tenantfederation/metadata_merge_querier_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/weaveworks/common/user" + "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/tenant" ) @@ -51,7 +52,7 @@ type mockMetadataQuerier struct { tenantIdToMetadata map[string][]scrape.MetricMetadata } -func (m *mockMetadataQuerier) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) { +func (m *mockMetadataQuerier) MetricsMetadata(ctx context.Context, _ *client.MetricsMetadataRequest) ([]scrape.MetricMetadata, error) { // Due to lint check for `ensure the query path is supporting multiple tenants` ids, err := tenant.TenantIDs(ctx) if err != nil { @@ -137,7 +138,7 @@ func Test_mergeMetadataQuerier_MetricsMetadata(t *testing.T) { } mergeMetadataQuerier := NewMetadataQuerier(&upstream, defaultMaxConcurrency, reg) - metadata, err := mergeMetadataQuerier.MetricsMetadata(user.InjectOrgID(context.Background(), test.orgId)) + metadata, err := mergeMetadataQuerier.MetricsMetadata(user.InjectOrgID(context.Background(), test.orgId), &client.MetricsMetadataRequest{Limit: -1, LimitPerMetric: -1, Metric: ""}) require.NoError(t, err) require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(test.expectedMetrics), "cortex_querier_federated_tenants_per_metadata_query")) require.Equal(t, test.expectedResults, metadata) diff --git a/pkg/querier/testutils.go b/pkg/querier/testutils.go index 0ee4a414640..37b1bd2b179 100644 --- a/pkg/querier/testutils.go +++ b/pkg/querier/testutils.go @@ -58,7 +58,7 @@ func (m *MockDistributor) MetricsForLabelMatchersStream(ctx context.Context, fro return args.Get(0).([]model.Metric), args.Error(1) } -func (m *MockDistributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) { +func (m *MockDistributor) MetricsMetadata(ctx context.Context, request *client.MetricsMetadataRequest) ([]scrape.MetricMetadata, error) { args := m.Called(ctx) return args.Get(0).([]scrape.MetricMetadata), args.Error(1) } diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index eb34ee02e3a..80ce675878c 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -155,7 +155,7 @@ func (t *PusherAppendable) Appender(ctx context.Context) storage.Appender { // RulesLimits defines limits used by Ruler. type RulesLimits interface { MaxQueryLength(userID string) time.Duration - RulerTenantShardSize(userID string) int + RulerTenantShardSize(userID string) float64 RulerMaxRuleGroupsPerTenant(userID string) int RulerMaxRulesPerRuleGroup(userID string) int RulerQueryOffset(userID string) time.Duration diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 067c7a4f591..22d475fe720 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -857,7 +857,7 @@ func (r *Ruler) listRulesShuffleSharding(ctx context.Context) (map[string]rulesp userRings := map[string]ring.ReadRing{} for _, u := range users { if shardSize := r.limits.RulerTenantShardSize(u); shardSize > 0 { - subRing := r.ring.ShuffleShard(u, shardSize) + subRing := r.ring.ShuffleShard(u, r.getShardSizeForUser(u)) // Include the user only if it belongs to this ruler shard. if subRing.HasInstance(r.lifecycler.GetInstanceID()) { @@ -1257,6 +1257,7 @@ func (r *Ruler) ruleGroupListToGroupStateDesc(userID string, backupGroups rulesp User: userID, Limit: group.Limit, QueryOffset: group.QueryOffset, + Labels: group.Labels, }, // We are keeping default value for EvaluationTimestamp and EvaluationDuration since the backup is not evaluating } @@ -1325,11 +1326,18 @@ func (r *Ruler) ruleGroupListToGroupStateDesc(userID string, backupGroups rulesp return groupDescs, nil } +func (r *Ruler) getShardSizeForUser(userID string) int { + newShardSize := util.DynamicShardSize(r.limits.RulerTenantShardSize(userID), r.ring.InstancesCount()) + + // We want to guarantee that shard size will be at least replication factor + return max(newShardSize, r.cfg.Ring.ReplicationFactor) +} + func (r *Ruler) getShardedRules(ctx context.Context, userID string, rulesRequest RulesRequest) (*RulesResponse, error) { ring := ring.ReadRing(r.ring) if shardSize := r.limits.RulerTenantShardSize(userID); shardSize > 0 && r.cfg.ShardingStrategy == util.ShardingStrategyShuffle { - ring = r.ring.ShuffleShard(userID, shardSize) + ring = r.ring.ShuffleShard(userID, r.getShardSizeForUser(userID)) } rulers, failedZones, err := GetReplicationSetForListRule(ring, &r.cfg.Ring) diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index 7fd7236e8aa..ec7eb287c30 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -87,7 +87,7 @@ func defaultRulerConfig(t testing.TB) Config { type ruleLimits struct { mtx sync.RWMutex - tenantShard int + tenantShard float64 maxRulesPerRuleGroup int maxRuleGroups int disabledRuleGroups validation.DisabledRuleGroups @@ -102,7 +102,7 @@ func (r *ruleLimits) setRulerExternalLabels(lset labels.Labels) { r.mtx.Unlock() } -func (r *ruleLimits) RulerTenantShardSize(_ string) int { +func (r *ruleLimits) RulerTenantShardSize(_ string) float64 { r.mtx.RLock() defer r.mtx.RUnlock() return r.tenantShard @@ -630,7 +630,7 @@ func TestGetRules(t *testing.T) { type testCase struct { sharding bool shardingStrategy string - shuffleShardSize int + shuffleShardSize float64 rulesRequest RulesRequest expectedCount map[string]int expectedClientCallCount int @@ -1887,7 +1887,7 @@ func TestSharding(t *testing.T) { sharding bool shardingStrategy string replicationFactor int - shuffleShardSize int + shuffleShardSize float64 setupRing func(*ring.Desc) enabledUsers []string disabledUsers []string @@ -3104,3 +3104,150 @@ func TestRuler_QueryOffset(t *testing.T) { gotOffset = rg.GetGroup().QueryOffset require.Equal(t, time.Minute*2, *gotOffset) } + +func TestGetShardSizeForUser(t *testing.T) { + tests := []struct { + name string + userID string + replicationFactor int + rulerInstanceCount int + tenantShardSize float64 + expectedShardSize int + }{ + { + name: "User with fixed shard size with 10 ruler instances", + userID: "user1", + rulerInstanceCount: 10, + replicationFactor: 1, + tenantShardSize: 2, + expectedShardSize: 2, + }, + { + name: "User with fixed shard size with 50 ruler instances", + userID: "user1", + rulerInstanceCount: 50, + replicationFactor: 1, + tenantShardSize: 30, + expectedShardSize: 30, + }, + { + name: "User with percentage shard size with 10 ruler instances", + userID: "user1", + rulerInstanceCount: 10, + replicationFactor: 1, + tenantShardSize: 0.6, + expectedShardSize: 6, + }, + { + name: "User with percentage shard size with 80 ruler instances", + userID: "user1", + rulerInstanceCount: 80, + replicationFactor: 1, + tenantShardSize: 0.25, + expectedShardSize: 20, + }, + { + name: "Ensure shard size is at least replication factor", + userID: "user1", + rulerInstanceCount: 10, + replicationFactor: 3, + tenantShardSize: 0.1, + expectedShardSize: 3, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + + rulerStateMap := make(map[string]ring.InstanceState) + rulerAZEvenSpread := make(map[string]string) + rulerIDs := make([]string, tc.rulerInstanceCount) + + for i := 0; i < tc.rulerInstanceCount; i++ { + rulerID := fmt.Sprintf("ruler%d", i+1) + rulerIDs[i] = rulerID + rulerStateMap[rulerID] = ring.ACTIVE + rulerAZEvenSpread[rulerID] = string(rune('a' + i%3)) + } + + kvStore, cleanUp := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) + t.Cleanup(func() { assert.NoError(t, cleanUp.Close()) }) + allRulesByUser := map[string]rulespb.RuleGroupList{} + allTokensByRuler := map[string][]uint32{} + rulerAddrMap := map[string]*Ruler{} + + createRuler := func(id string) *Ruler { + store := newMockRuleStore(allRulesByUser, nil) + cfg := defaultRulerConfig(t) + + cfg.ShardingStrategy = util.ShardingStrategyShuffle + cfg.EnableSharding = true + cfg.EnableHAEvaluation = false + cfg.EvaluationInterval = 5 * time.Minute + + cfg.Ring = RingConfig{ + InstanceID: id, + InstanceAddr: id, + KVStore: kv.Config{ + Mock: kvStore, + }, + ReplicationFactor: tc.replicationFactor, + ZoneAwarenessEnabled: true, + InstanceZone: rulerAZEvenSpread[id], + } + + r, _ := buildRuler(t, cfg, nil, store, rulerAddrMap) + r.limits = &ruleLimits{tenantShard: tc.tenantShardSize} + rulerAddrMap[id] = r + if r.ring != nil { + require.NoError(t, services.StartAndAwaitRunning(context.Background(), r.ring)) + t.Cleanup(r.ring.StopAsync) + } + return r + } + + var testRuler *Ruler + // Create rulers and ensure they join the ring + for _, rID := range rulerIDs { + r := createRuler(rID) + testRuler = r + require.NoError(t, services.StartAndAwaitRunning(context.Background(), r.lifecycler)) + } + + err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + d, _ := in.(*ring.Desc) + if d == nil { + d = ring.NewDesc() + } + for rID, tokens := range allTokensByRuler { + d.AddIngester(rID, rulerAddrMap[rID].lifecycler.GetInstanceAddr(), rulerAddrMap[rID].lifecycler.GetInstanceZone(), tokens, ring.ACTIVE, time.Now()) + } + return d, true, nil + }) + require.NoError(t, err) + // Wait a bit to make sure ruler's ring is updated. + time.Sleep(100 * time.Millisecond) + + // Check the ring state + ringDesc, err := kvStore.Get(context.Background(), ringKey) + require.NoError(t, err) + require.NotNil(t, ringDesc) + desc := ringDesc.(*ring.Desc) + require.Equal(t, tc.rulerInstanceCount, len(desc.Ingesters)) + + forEachRuler := func(f func(rID string, r *Ruler)) { + for rID, r := range rulerAddrMap { + f(rID, r) + } + } + + // Sync Rules + forEachRuler(func(_ string, r *Ruler) { + r.syncRules(context.Background(), rulerSyncReasonInitial) + }) + + result := testRuler.getShardSizeForUser(tc.userID) + assert.Equal(t, tc.expectedShardSize, result) + }) + } +} diff --git a/pkg/ruler/rulespb/compat.go b/pkg/ruler/rulespb/compat.go index 7526062ad4f..dd674129ae2 100644 --- a/pkg/ruler/rulespb/compat.go +++ b/pkg/ruler/rulespb/compat.go @@ -26,6 +26,7 @@ func ToProto(user string, namespace string, rl rulefmt.RuleGroup) *RuleGroupDesc User: user, Limit: int64(rl.Limit), QueryOffset: queryOffset, + Labels: cortexpb.FromLabelsToLabelAdapters(labels.FromMap(rl.Labels)), } return &rg } @@ -60,6 +61,7 @@ func FromProto(rg *RuleGroupDesc) rulefmt.RuleGroup { Rules: make([]rulefmt.RuleNode, len(rg.GetRules())), Limit: int(rg.Limit), QueryOffset: queryOffset, + Labels: cortexpb.FromLabelAdaptersToLabels(rg.Labels).Map(), } for i, rl := range rg.GetRules() { diff --git a/pkg/ruler/rulespb/compat_test.go b/pkg/ruler/rulespb/compat_test.go index 736366714dc..31b80f2df6d 100644 --- a/pkg/ruler/rulespb/compat_test.go +++ b/pkg/ruler/rulespb/compat_test.go @@ -35,6 +35,7 @@ func TestProto(t *testing.T) { Rules: rules, Interval: model.Duration(time.Minute), QueryOffset: &queryOffset, + Labels: map[string]string{}, } desc := ToProto("test", "namespace", rg) diff --git a/pkg/ruler/rulespb/rules.pb.go b/pkg/ruler/rulespb/rules.pb.go index 8f09b2cb40b..c8096fb4510 100644 --- a/pkg/ruler/rulespb/rules.pb.go +++ b/pkg/ruler/rulespb/rules.pb.go @@ -43,9 +43,10 @@ type RuleGroupDesc struct { // having to repeatedly redefine the proto description. It can also be leveraged // to create custom `ManagerOpts` based on rule configs which can then be passed // to the Prometheus Manager. - Options []*types.Any `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"` - Limit int64 `protobuf:"varint,10,opt,name=limit,proto3" json:"limit,omitempty"` - QueryOffset *time.Duration `protobuf:"bytes,11,opt,name=queryOffset,proto3,stdduration" json:"queryOffset,omitempty"` + Options []*types.Any `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"` + Limit int64 `protobuf:"varint,10,opt,name=limit,proto3" json:"limit,omitempty"` + QueryOffset *time.Duration `protobuf:"bytes,11,opt,name=queryOffset,proto3,stdduration" json:"queryOffset,omitempty"` + Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,12,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"labels"` } func (m *RuleGroupDesc) Reset() { *m = RuleGroupDesc{} } @@ -222,42 +223,42 @@ func init() { func init() { proto.RegisterFile("rules.proto", fileDescriptor_8e722d3e922f0937) } var fileDescriptor_8e722d3e922f0937 = []byte{ - // 548 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x52, 0x41, 0x8b, 0xd3, 0x4c, - 0x18, 0xce, 0x6c, 0xd3, 0x6c, 0x3a, 0xa1, 0xec, 0x32, 0x5f, 0xf9, 0xc8, 0xae, 0x32, 0x2d, 0x0b, - 0x42, 0x4f, 0x29, 0xac, 0x78, 0xf0, 0x20, 0xd2, 0xb2, 0xae, 0x50, 0x04, 0x25, 0x47, 0x11, 0x96, - 0x49, 0x3a, 0x89, 0x71, 0xd3, 0x4c, 0x9c, 0x4c, 0x64, 0x7b, 0xf3, 0x27, 0x78, 0xf4, 0x27, 0xf8, - 0x53, 0xf6, 0x58, 0x6f, 0x8b, 0x48, 0xb5, 0xe9, 0x45, 0x3c, 0xed, 0x4f, 0x90, 0x99, 0x49, 0x74, - 0xd5, 0x83, 0xeb, 0xc1, 0x53, 0xde, 0x67, 0x9e, 0x3c, 0xf3, 0x3e, 0xef, 0x33, 0x2f, 0x74, 0x78, - 0x99, 0xd2, 0xc2, 0xcb, 0x39, 0x13, 0x0c, 0xb5, 0x15, 0xd8, 0xef, 0xc5, 0x2c, 0x66, 0xea, 0x64, - 0x24, 0x2b, 0x4d, 0xee, 0xe3, 0x98, 0xb1, 0x38, 0xa5, 0x23, 0x85, 0x82, 0x32, 0x1a, 0xcd, 0x4a, - 0x4e, 0x44, 0xc2, 0xb2, 0x9a, 0xdf, 0xfb, 0x95, 0x27, 0xd9, 0xa2, 0xa6, 0xee, 0xc6, 0x89, 0x78, - 0x5e, 0x06, 0x5e, 0xc8, 0xe6, 0xa3, 0x90, 0x71, 0x41, 0xcf, 0x72, 0xce, 0x5e, 0xd0, 0x50, 0xd4, - 0x68, 0x94, 0x9f, 0xc6, 0x0d, 0x11, 0xd4, 0x85, 0x96, 0x1e, 0x7c, 0xdc, 0x82, 0x5d, 0xbf, 0x4c, - 0xe9, 0x43, 0xce, 0xca, 0xfc, 0x88, 0x16, 0x21, 0x42, 0xd0, 0xcc, 0xc8, 0x9c, 0xba, 0x60, 0x00, - 0x86, 0x1d, 0x5f, 0xd5, 0xe8, 0x26, 0xec, 0xc8, 0x6f, 0x91, 0x93, 0x90, 0xba, 0x5b, 0x8a, 0xf8, - 0x71, 0x80, 0xee, 0x43, 0x3b, 0xc9, 0x04, 0xe5, 0xaf, 0x48, 0xea, 0xb6, 0x06, 0x60, 0xe8, 0x1c, - 0xee, 0x79, 0xda, 0xac, 0xd7, 0x98, 0xf5, 0x8e, 0xea, 0x61, 0x26, 0xf6, 0xf9, 0xaa, 0x6f, 0xbc, - 0xfd, 0xd4, 0x07, 0xfe, 0x77, 0x11, 0xba, 0x05, 0x75, 0x32, 0xae, 0x39, 0x68, 0x0d, 0x9d, 0xc3, - 0x1d, 0x4f, 0x87, 0x26, 0x7d, 0x49, 0x4b, 0xbe, 0x66, 0xa5, 0xb3, 0xb2, 0xa0, 0xdc, 0xb5, 0xb4, - 0x33, 0x59, 0x23, 0x0f, 0x6e, 0xb3, 0x5c, 0x5e, 0x5c, 0xb8, 0x1d, 0x25, 0xee, 0xfd, 0xd6, 0x7a, - 0x9c, 0x2d, 0xfc, 0xe6, 0x27, 0xd4, 0x83, 0xed, 0x34, 0x99, 0x27, 0xc2, 0x85, 0x03, 0x30, 0x6c, - 0xf9, 0x1a, 0xa0, 0x07, 0xd0, 0x79, 0x59, 0x52, 0xbe, 0x78, 0x1c, 0x45, 0x05, 0x15, 0xae, 0x73, - 0x9d, 0x21, 0x80, 0x1a, 0xe2, 0xaa, 0x6e, 0x6a, 0xda, 0xed, 0x5d, 0x6b, 0x6a, 0xda, 0xdb, 0xbb, - 0xf6, 0xd4, 0xb4, 0xed, 0xdd, 0xce, 0xc1, 0xfb, 0x16, 0xb4, 0x9b, 0x31, 0xa4, 0x7f, 0xf9, 0x32, - 0x4d, 0xb2, 0xb2, 0x46, 0xff, 0x43, 0x8b, 0xd3, 0x90, 0xf1, 0x59, 0x1d, 0x6b, 0x8d, 0xa4, 0x4f, - 0x92, 0x52, 0x2e, 0x54, 0xa0, 0x1d, 0x5f, 0x03, 0x74, 0x07, 0xb6, 0x22, 0xc6, 0x5d, 0xf3, 0xfa, - 0x21, 0xcb, 0xff, 0x51, 0x06, 0xad, 0x94, 0x04, 0x34, 0x2d, 0xdc, 0xb6, 0xca, 0xe8, 0x3f, 0xaf, - 0x59, 0x06, 0xef, 0x91, 0x3c, 0x7f, 0x42, 0x12, 0x3e, 0x19, 0x4b, 0xcd, 0x87, 0x55, 0xff, 0xaf, - 0x96, 0x49, 0xeb, 0xc7, 0x33, 0x92, 0x0b, 0xca, 0xfd, 0xba, 0x0b, 0x3a, 0x83, 0x0e, 0xc9, 0x32, - 0x26, 0x88, 0x7e, 0x18, 0xeb, 0x9f, 0x36, 0xbd, 0xda, 0x0a, 0x3d, 0x83, 0xdd, 0x53, 0x4a, 0xf3, - 0xe3, 0x84, 0x27, 0x59, 0x7c, 0xcc, 0xb8, 0xdb, 0xfd, 0x53, 0x54, 0x37, 0xa4, 0x83, 0xaf, 0xab, - 0xfe, 0x8e, 0xd4, 0x9d, 0x44, 0x4a, 0x78, 0x12, 0x31, 0xae, 0xd2, 0xfb, 0xf9, 0x32, 0xf5, 0xb2, - 0xdd, 0xc9, 0xbd, 0xe5, 0x1a, 0x1b, 0x17, 0x6b, 0x6c, 0x5c, 0xae, 0x31, 0x78, 0x5d, 0x61, 0xf0, - 0xae, 0xc2, 0xe0, 0xbc, 0xc2, 0x60, 0x59, 0x61, 0xf0, 0xb9, 0xc2, 0xe0, 0x4b, 0x85, 0x8d, 0xcb, - 0x0a, 0x83, 0x37, 0x1b, 0x6c, 0x2c, 0x37, 0xd8, 0xb8, 0xd8, 0x60, 0xe3, 0xe9, 0xb6, 0xda, 0xe1, - 0x3c, 0x08, 0x2c, 0xe5, 0xe1, 0xf6, 0xb7, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9a, 0x1c, 0xe8, 0x17, - 0x1a, 0x04, 0x00, 0x00, + // 554 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x53, 0x31, 0x6f, 0xd3, 0x40, + 0x18, 0xf5, 0x61, 0xc7, 0x75, 0xce, 0x44, 0xad, 0x8e, 0x08, 0xb9, 0x05, 0x5d, 0xa2, 0x4a, 0x48, + 0x99, 0x1c, 0xa9, 0x88, 0x81, 0x01, 0xa1, 0x44, 0xa5, 0x48, 0x11, 0x12, 0xc8, 0x23, 0x42, 0xaa, + 0x6c, 0xe7, 0x6c, 0x4c, 0x1d, 0x9f, 0x39, 0x9f, 0x51, 0xb3, 0xf1, 0x13, 0x18, 0xf9, 0x09, 0xfc, + 0x94, 0x0e, 0x0c, 0x61, 0xab, 0x18, 0x02, 0x71, 0x16, 0xc4, 0xd4, 0x9f, 0x80, 0xee, 0xce, 0x86, + 0x00, 0x03, 0x65, 0x80, 0x29, 0xdf, 0x77, 0xef, 0xde, 0x7d, 0xef, 0x7b, 0x2f, 0x86, 0x36, 0x2b, + 0x53, 0x52, 0xb8, 0x39, 0xa3, 0x9c, 0xa2, 0x96, 0x6c, 0xf6, 0xba, 0x31, 0x8d, 0xa9, 0x3c, 0x19, + 0x8a, 0x4a, 0x81, 0x7b, 0x38, 0xa6, 0x34, 0x4e, 0xc9, 0x50, 0x76, 0x41, 0x19, 0x0d, 0xa7, 0x25, + 0xf3, 0x79, 0x42, 0xb3, 0x1a, 0xdf, 0xfd, 0x15, 0xf7, 0xb3, 0x79, 0x0d, 0xdd, 0x8d, 0x13, 0xfe, + 0xbc, 0x0c, 0xdc, 0x90, 0xce, 0x86, 0x21, 0x65, 0x9c, 0x9c, 0xe6, 0x8c, 0xbe, 0x20, 0x21, 0xaf, + 0xbb, 0x61, 0x7e, 0x12, 0x37, 0x40, 0x50, 0x17, 0x8a, 0xba, 0xff, 0x5e, 0x87, 0x1d, 0xaf, 0x4c, + 0xc9, 0x43, 0x46, 0xcb, 0xfc, 0x90, 0x14, 0x21, 0x42, 0xd0, 0xc8, 0xfc, 0x19, 0x71, 0x40, 0x1f, + 0x0c, 0xda, 0x9e, 0xac, 0xd1, 0x4d, 0xd8, 0x16, 0xbf, 0x45, 0xee, 0x87, 0xc4, 0xb9, 0x22, 0x81, + 0x1f, 0x07, 0xe8, 0x3e, 0xb4, 0x92, 0x8c, 0x13, 0xf6, 0xca, 0x4f, 0x1d, 0xbd, 0x0f, 0x06, 0xf6, + 0xc1, 0xae, 0xab, 0xc4, 0xba, 0x8d, 0x58, 0xf7, 0xb0, 0x5e, 0x66, 0x6c, 0x9d, 0x2d, 0x7b, 0xda, + 0xdb, 0x4f, 0x3d, 0xe0, 0x7d, 0x27, 0xa1, 0x5b, 0x50, 0x39, 0xe3, 0x18, 0x7d, 0x7d, 0x60, 0x1f, + 0x6c, 0xbb, 0xca, 0x34, 0xa1, 0x4b, 0x48, 0xf2, 0x14, 0x2a, 0x94, 0x95, 0x05, 0x61, 0x8e, 0xa9, + 0x94, 0x89, 0x1a, 0xb9, 0x70, 0x8b, 0xe6, 0xe2, 0xe1, 0xc2, 0x69, 0x4b, 0x72, 0xf7, 0xb7, 0xd1, + 0xa3, 0x6c, 0xee, 0x35, 0x97, 0x50, 0x17, 0xb6, 0xd2, 0x64, 0x96, 0x70, 0x07, 0xf6, 0xc1, 0x40, + 0xf7, 0x54, 0x83, 0x1e, 0x40, 0xfb, 0x65, 0x49, 0xd8, 0xfc, 0x71, 0x14, 0x15, 0x84, 0x3b, 0xf6, + 0x65, 0x96, 0x00, 0x72, 0x89, 0x4d, 0x1e, 0xca, 0xa0, 0x99, 0xfa, 0x01, 0x49, 0x0b, 0xe7, 0xaa, + 0xd4, 0x72, 0xcd, 0x6d, 0x4c, 0x77, 0x1f, 0x89, 0xf3, 0x27, 0x7e, 0xc2, 0xc6, 0x23, 0x61, 0xc0, + 0xc7, 0x65, 0xef, 0xaf, 0x42, 0x53, 0xfc, 0xd1, 0xd4, 0xcf, 0x39, 0x61, 0x5e, 0x3d, 0x65, 0x62, + 0x58, 0xad, 0x1d, 0x73, 0x62, 0x58, 0x5b, 0x3b, 0xd6, 0xc4, 0xb0, 0xac, 0x9d, 0xf6, 0xfe, 0x07, + 0x1d, 0x5a, 0x8d, 0x6d, 0xc2, 0x2f, 0xf1, 0x68, 0x93, 0xa4, 0xa8, 0xd1, 0x75, 0x68, 0x32, 0x12, + 0x52, 0x36, 0xad, 0x63, 0xac, 0x3b, 0xe1, 0x8b, 0x9f, 0x12, 0xc6, 0x65, 0x80, 0x6d, 0x4f, 0x35, + 0xe8, 0x0e, 0xd4, 0x23, 0xca, 0x1c, 0xe3, 0xf2, 0xa1, 0x8a, 0xfb, 0x1b, 0x3e, 0xb4, 0xfe, 0x87, + 0x0f, 0xe8, 0x14, 0xda, 0x7e, 0x96, 0x51, 0xee, 0xab, 0x3f, 0x82, 0xf9, 0x4f, 0x87, 0x6e, 0x8e, + 0x42, 0xcf, 0x60, 0xe7, 0x84, 0x90, 0xfc, 0x28, 0x61, 0x49, 0x16, 0x1f, 0x51, 0xe6, 0x74, 0xfe, + 0x64, 0xd5, 0x0d, 0xa1, 0xe0, 0xeb, 0xb2, 0xb7, 0x2d, 0x78, 0xc7, 0x91, 0x24, 0x1e, 0x47, 0x94, + 0x49, 0xf7, 0x7e, 0x7e, 0x4c, 0x26, 0xdb, 0x19, 0xdf, 0x5b, 0xac, 0xb0, 0x76, 0xbe, 0xc2, 0xda, + 0xc5, 0x0a, 0x83, 0xd7, 0x15, 0x06, 0xef, 0x2a, 0x0c, 0xce, 0x2a, 0x0c, 0x16, 0x15, 0x06, 0x9f, + 0x2b, 0x0c, 0xbe, 0x54, 0x58, 0xbb, 0xa8, 0x30, 0x78, 0xb3, 0xc6, 0xda, 0x62, 0x8d, 0xb5, 0xf3, + 0x35, 0xd6, 0x9e, 0x6e, 0xc9, 0x6f, 0x26, 0x0f, 0x02, 0x53, 0x6a, 0xb8, 0xfd, 0x2d, 0x00, 0x00, + 0xff, 0xff, 0x39, 0x21, 0xc0, 0xef, 0x8a, 0x04, 0x00, 0x00, } func (this *RuleGroupDesc) Equal(that interface{}) bool { @@ -319,6 +320,14 @@ func (this *RuleGroupDesc) Equal(that interface{}) bool { } else if that1.QueryOffset != nil { return false } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } return true } func (this *RuleDesc) Equal(that interface{}) bool { @@ -377,7 +386,7 @@ func (this *RuleGroupDesc) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 12) + s := make([]string, 0, 13) s = append(s, "&rulespb.RuleGroupDesc{") s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") s = append(s, "Namespace: "+fmt.Sprintf("%#v", this.Namespace)+",\n") @@ -391,6 +400,7 @@ func (this *RuleGroupDesc) GoString() string { } s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") s = append(s, "QueryOffset: "+fmt.Sprintf("%#v", this.QueryOffset)+",\n") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -438,6 +448,20 @@ func (m *RuleGroupDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRules(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if m.QueryOffset != nil { n1, err1 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.QueryOffset, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.QueryOffset):]) if err1 != nil { @@ -651,6 +675,12 @@ func (m *RuleGroupDesc) Size() (n int) { l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.QueryOffset) n += 1 + l + sovRules(uint64(l)) } + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovRules(uint64(l)) + } + } return n } @@ -720,6 +750,7 @@ func (this *RuleGroupDesc) String() string { `Options:` + repeatedStringForOptions + `,`, `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `QueryOffset:` + strings.Replace(fmt.Sprintf("%v", this.QueryOffset), "Duration", "duration.Duration", 1) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, `}`, }, "") return s @@ -1029,6 +1060,40 @@ func (m *RuleGroupDesc) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRules(dAtA[iNdEx:]) diff --git a/pkg/ruler/rulespb/rules.proto b/pkg/ruler/rulespb/rules.proto index ed4e98a76b2..f60a0a00fbe 100644 --- a/pkg/ruler/rulespb/rules.proto +++ b/pkg/ruler/rulespb/rules.proto @@ -30,6 +30,10 @@ message RuleGroupDesc { int64 limit =10; google.protobuf.Duration queryOffset = 11 [(gogoproto.nullable) = true, (gogoproto.stdduration) = true]; + repeated cortexpb.LabelPair labels = 12 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" + ]; } // RuleDesc is a proto representation of a Prometheus Rule diff --git a/pkg/storegateway/gateway.go b/pkg/storegateway/gateway.go index c043adee181..835e95e8912 100644 --- a/pkg/storegateway/gateway.go +++ b/pkg/storegateway/gateway.go @@ -16,8 +16,10 @@ import ( "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/logging" + "github.com/cortexproject/cortex/pkg/configs" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/storage/bucket" @@ -25,6 +27,8 @@ import ( "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_limiter "github.com/cortexproject/cortex/pkg/util/limiter" + "github.com/cortexproject/cortex/pkg/util/resource" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -64,6 +68,8 @@ type Config struct { EnabledTenants flagext.StringSliceCSV `yaml:"enabled_tenants"` DisabledTenants flagext.StringSliceCSV `yaml:"disabled_tenants"` + InstanceLimits configs.InstanceLimits `yaml:"instance_limits"` + // Hedged Request HedgedRequest bucket.HedgedRequestConfig `yaml:"hedged_request"` } @@ -77,10 +83,11 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.Var(&cfg.EnabledTenants, "store-gateway.enabled-tenants", "Comma separated list of tenants whose store metrics this storegateway can process. If specified, only these tenants will be handled by storegateway, otherwise this storegateway will be enabled for all the tenants in the store-gateway cluster.") f.Var(&cfg.DisabledTenants, "store-gateway.disabled-tenants", "Comma separated list of tenants whose store metrics this storegateway cannot process. If specified, a storegateway that would normally pick the specified tenant(s) for processing will ignore them instead.") cfg.HedgedRequest.RegisterFlagsWithPrefix(f, "store-gateway.") + cfg.InstanceLimits.RegisterFlagsWithPrefix(f, "store-gateway.") } // Validate the Config. -func (cfg *Config) Validate(limits validation.Limits) error { +func (cfg *Config) Validate(limits validation.Limits, monitoredResources flagext.StringSliceCSV) error { if cfg.ShardingEnabled { if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { return errInvalidShardingStrategy @@ -95,6 +102,10 @@ func (cfg *Config) Validate(limits validation.Limits) error { return err } + if err := cfg.InstanceLimits.Validate(monitoredResources); err != nil { + return err + } + return nil } @@ -117,10 +128,12 @@ type StoreGateway struct { subservices *services.Manager subservicesWatcher *services.FailureWatcher + resourceBasedLimiter *util_limiter.ResourceBasedLimiter + bucketSync *prometheus.CounterVec } -func NewStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, limits *validation.Overrides, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*StoreGateway, error) { +func NewStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, limits *validation.Overrides, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer, resourceMonitor *resource.Monitor) (*StoreGateway, error) { var ringStore kv.Client bucketClient, err := createBucketClient(storageCfg, gatewayCfg.HedgedRequest.GetHedgedRoundTripper(), logger, reg) @@ -140,10 +153,10 @@ func NewStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.BlocksStorageConf } } - return newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, limits, logLevel, logger, reg) + return newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, limits, logLevel, logger, reg, resourceMonitor) } -func newStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, bucketClient objstore.InstrumentedBucket, ringStore kv.Client, limits *validation.Overrides, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*StoreGateway, error) { +func newStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, bucketClient objstore.InstrumentedBucket, ringStore kv.Client, limits *validation.Overrides, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer, resourceMonitor *resource.Monitor) (*StoreGateway, error) { var err error g := &StoreGateway{ @@ -229,6 +242,20 @@ func newStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.BlocksStorageConf return nil, errors.Wrap(err, "create bucket stores") } + if resourceMonitor != nil { + resourceLimits := make(map[resource.Type]float64) + if gatewayCfg.InstanceLimits.CPUUtilization > 0 { + resourceLimits[resource.CPU] = gatewayCfg.InstanceLimits.CPUUtilization + } + if gatewayCfg.InstanceLimits.HeapUtilization > 0 { + resourceLimits[resource.Heap] = gatewayCfg.InstanceLimits.HeapUtilization + } + g.resourceBasedLimiter, err = util_limiter.NewResourceBasedLimiter(resourceMonitor, resourceLimits, reg, "store-gateway") + if err != nil { + return nil, errors.Wrap(err, "error creating resource based limiter") + } + } + g.Service = services.NewBasicService(g.starting, g.running, g.stopping) return g, nil @@ -381,19 +408,41 @@ func (g *StoreGateway) syncStores(ctx context.Context, reason string) { } func (g *StoreGateway) Series(req *storepb.SeriesRequest, srv storegatewaypb.StoreGateway_SeriesServer) error { + if err := g.checkResourceUtilization(); err != nil { + return err + } return g.stores.Series(req, srv) } // LabelNames implements the Storegateway proto service. func (g *StoreGateway) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { + if err := g.checkResourceUtilization(); err != nil { + return nil, err + } return g.stores.LabelNames(ctx, req) } // LabelValues implements the Storegateway proto service. func (g *StoreGateway) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { + if err := g.checkResourceUtilization(); err != nil { + return nil, err + } return g.stores.LabelValues(ctx, req) } +func (g *StoreGateway) checkResourceUtilization() error { + if g.resourceBasedLimiter == nil { + return nil + } + + if err := g.resourceBasedLimiter.AcceptNewRequest(); err != nil { + level.Warn(g.logger).Log("msg", "failed to accept request", "err", err) + return httpgrpc.Errorf(http.StatusTooManyRequests, "failed to query: %s", util_limiter.ErrResourceLimitReachedStr) + } + + return nil +} + func (g *StoreGateway) OnRingInstanceRegister(lc *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.InstanceState, ring.Tokens) { // When we initialize the store-gateway instance in the ring we want to start from // a clean situation, so whatever is the state we set it JOINING, while we keep existing diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index d35f3677b4b..ca35034bd43 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -41,6 +41,8 @@ import ( cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_limiter "github.com/cortexproject/cortex/pkg/util/limiter" + "github.com/cortexproject/cortex/pkg/util/resource" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" "github.com/cortexproject/cortex/pkg/util/validation" @@ -89,7 +91,7 @@ func TestConfig_Validate(t *testing.T) { flagext.DefaultValues(cfg, limits) testData.setup(cfg, limits) - assert.Equal(t, testData.expected, cfg.Validate(*limits)) + assert.Equal(t, testData.expected, cfg.Validate(*limits, nil)) }) } } @@ -150,7 +152,7 @@ func TestStoreGateway_InitialSyncWithDefaultShardingEnabled(t *testing.T) { })) } - g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), nil) + g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), nil, nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(ctx, g) //nolint:errcheck assert.False(t, g.ringLifecycler.IsRegistered()) @@ -192,7 +194,7 @@ func TestStoreGateway_InitialSyncWithShardingDisabled(t *testing.T) { storageCfg := mockStorageConfig(t) bucketClient := &bucket.ClientMock{} - g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, nil, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), nil) + g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, nil, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), nil, nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(ctx, g) //nolint:errcheck @@ -219,7 +221,7 @@ func TestStoreGateway_InitialSyncFailure(t *testing.T) { bucketClient := &bucket.ClientMock{} - g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), nil) + g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), nil, nil) require.NoError(t, err) bucketClient.MockIter("", []string{}, errors.New("network error")) @@ -358,7 +360,7 @@ func TestStoreGateway_InitialSyncWithWaitRingStability(t *testing.T) { require.NoError(t, err) reg := prometheus.NewPedanticRegistry() - g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, overrides, mockLoggingLevel(), log.NewNopLogger(), reg) + g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, overrides, mockLoggingLevel(), log.NewNopLogger(), reg, nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(ctx, g) //nolint:errcheck @@ -460,7 +462,7 @@ func TestStoreGateway_BlocksSyncWithDefaultSharding_RingTopologyChangedAfterScal require.NoError(t, err) reg := prometheus.NewPedanticRegistry() - g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, overrides, mockLoggingLevel(), log.NewNopLogger(), reg) + g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, overrides, mockLoggingLevel(), log.NewNopLogger(), reg, nil) require.NoError(t, err) return g, instanceID, reg @@ -604,7 +606,7 @@ func TestStoreGateway_ShouldSupportLoadRingTokensFromFile(t *testing.T) { bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{}, nil) - g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), nil) + g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), nil, nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(ctx, g) //nolint:errcheck assert.False(t, g.ringLifecycler.IsRegistered()) @@ -814,7 +816,7 @@ func TestStoreGateway_SyncOnRingTopologyChanged(t *testing.T) { bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{}, nil) - g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), reg) + g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), reg, nil) require.NoError(t, err) // Store the initial ring state before starting the gateway. @@ -876,7 +878,7 @@ func TestStoreGateway_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testin bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{}, nil) - g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), nil) + g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), nil, nil) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(ctx, g)) defer services.StopAndAwaitTerminated(ctx, g) //nolint:errcheck @@ -960,7 +962,7 @@ func TestStoreGateway_SeriesQueryingShouldRemoveExternalLabels(t *testing.T) { storageCfg := mockStorageConfig(t) storageCfg.BucketStore.BucketIndex.Enabled = bucketIndexEnabled - g, err := newStoreGateway(gatewayCfg, storageCfg, objstore.WithNoopInstr(bucketClient), nil, defaultLimitsOverrides(t), mockLoggingLevel(), logger, nil) + g, err := newStoreGateway(gatewayCfg, storageCfg, objstore.WithNoopInstr(bucketClient), nil, defaultLimitsOverrides(t), mockLoggingLevel(), logger, nil, nil) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(ctx, g)) defer services.StopAndAwaitTerminated(ctx, g) //nolint:errcheck @@ -1059,7 +1061,7 @@ func TestStoreGateway_SeriesQueryingShouldEnforceMaxChunksPerQueryLimit(t *testi gatewayCfg.ShardingEnabled = false storageCfg := mockStorageConfig(t) - g, err := newStoreGateway(gatewayCfg, storageCfg, objstore.WithNoopInstr(bucketClient), nil, overrides, mockLoggingLevel(), logger, nil) + g, err := newStoreGateway(gatewayCfg, storageCfg, objstore.WithNoopInstr(bucketClient), nil, overrides, mockLoggingLevel(), logger, nil, nil) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(ctx, g)) defer services.StopAndAwaitTerminated(ctx, g) //nolint:errcheck @@ -1148,7 +1150,7 @@ func TestStoreGateway_SeriesQueryingShouldEnforceMaxSeriesPerQueryLimit(t *testi gatewayCfg.ShardingEnabled = false storageCfg := mockStorageConfig(t) - g, err := newStoreGateway(gatewayCfg, storageCfg, objstore.WithNoopInstr(bucketClient), nil, overrides, mockLoggingLevel(), logger, nil) + g, err := newStoreGateway(gatewayCfg, storageCfg, objstore.WithNoopInstr(bucketClient), nil, overrides, mockLoggingLevel(), logger, nil, nil) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(ctx, g)) defer services.StopAndAwaitTerminated(ctx, g) //nolint:errcheck @@ -1176,6 +1178,58 @@ func TestStoreGateway_SeriesQueryingShouldEnforceMaxSeriesPerQueryLimit(t *testi } } +func TestStoreGateway_SeriesThrottledByResourceMonitor(t *testing.T) { + ctx := context.Background() + logger := log.NewNopLogger() + userID := "user-1" + + storageDir, err := os.MkdirTemp(os.TempDir(), "") + require.NoError(t, err) + defer os.RemoveAll(storageDir) //nolint:errcheck + + now := time.Now() + minT := now.Add(-1*time.Hour).Unix() * 1000 + maxT := now.Unix() * 1000 + mockTSDB(t, path.Join(storageDir, userID), 1, 0, minT, maxT) + + bucketClient, err := filesystem.NewBucketClient(filesystem.Config{Directory: storageDir}) + require.NoError(t, err) + + req := &storepb.SeriesRequest{ + MinTime: minT, + MaxTime: maxT, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_RE, Name: "__name__", Value: ".*"}, + }, + } + + overrides, err := validation.NewOverrides(defaultLimitsConfig(), nil) + require.NoError(t, err) + + // Create a store-gateway used to query back the series from the blocks. + gatewayCfg := mockGatewayConfig() + gatewayCfg.ShardingEnabled = false + storageCfg := mockStorageConfig(t) + + g, err := newStoreGateway(gatewayCfg, storageCfg, objstore.WithNoopInstr(bucketClient), nil, overrides, mockLoggingLevel(), logger, nil, nil) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(ctx, g)) + defer services.StopAndAwaitTerminated(ctx, g) //nolint:errcheck + + limits := map[resource.Type]float64{ + resource.CPU: 0.5, + resource.Heap: 0.5, + } + g.resourceBasedLimiter, err = util_limiter.NewResourceBasedLimiter(&mockResourceMonitor{cpu: 0.4, heap: 0.6}, limits, nil, "store-gateway") + require.NoError(t, err) + + srv := newBucketStoreSeriesServer(setUserIDToGRPCContext(ctx, userID)) + err = g.Series(req, srv) + require.Error(t, err) + exhaustedErr := util_limiter.ResourceLimitReachedError{} + require.ErrorContains(t, err, exhaustedErr.Error()) +} + func mockGatewayConfig() Config { cfg := Config{} flagext.DefaultValues(&cfg) @@ -1188,6 +1242,19 @@ func mockGatewayConfig() Config { return cfg } +type mockResourceMonitor struct { + cpu float64 + heap float64 +} + +func (m *mockResourceMonitor) GetCPUUtilization() float64 { + return m.cpu +} + +func (m *mockResourceMonitor) GetHeapUtilization() float64 { + return m.heap +} + func mockStorageConfig(t *testing.T) cortex_tsdb.BlocksStorageConfig { cfg := cortex_tsdb.BlocksStorageConfig{} flagext.DefaultValues(&cfg) diff --git a/pkg/util/limiter/resource_based_limiter.go b/pkg/util/limiter/resource_based_limiter.go new file mode 100644 index 00000000000..40e4768cd0c --- /dev/null +++ b/pkg/util/limiter/resource_based_limiter.go @@ -0,0 +1,72 @@ +package limiter + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/cortexproject/cortex/pkg/util/resource" +) + +const ErrResourceLimitReachedStr = "resource limit reached" + +type ResourceLimitReachedError struct{} + +func (e *ResourceLimitReachedError) Error() string { + return ErrResourceLimitReachedStr +} + +type ResourceBasedLimiter struct { + resourceMonitor resource.IMonitor + limits map[resource.Type]float64 + limitBreachedCount *prometheus.CounterVec +} + +func NewResourceBasedLimiter(resourceMonitor resource.IMonitor, limits map[resource.Type]float64, registerer prometheus.Registerer, component string) (*ResourceBasedLimiter, error) { + for resType, limit := range limits { + switch resType { + case resource.CPU, resource.Heap: + promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_resource_based_limiter_limit", + Help: "Limit set for the resource utilization.", + ConstLabels: map[string]string{"component": component}, + }).Set(limit) + default: + return nil, fmt.Errorf("unsupported resource type: [%s]", resType) + } + } + + return &ResourceBasedLimiter{ + resourceMonitor: resourceMonitor, + limits: limits, + limitBreachedCount: promauto.With(registerer).NewCounterVec( + prometheus.CounterOpts{ + Name: "cortex_resource_based_limiter_throttled_total", + Help: "The total number of times resource based limiter throttled.", + ConstLabels: map[string]string{"component": component}, + }, + []string{"resource"}, + ), + }, nil +} + +func (l *ResourceBasedLimiter) AcceptNewRequest() error { + for resType, limit := range l.limits { + var utilization float64 + + switch resType { + case resource.CPU: + utilization = l.resourceMonitor.GetCPUUtilization() + case resource.Heap: + utilization = l.resourceMonitor.GetHeapUtilization() + } + + if utilization >= limit { + l.limitBreachedCount.WithLabelValues(string(resType)).Inc() + return fmt.Errorf("%s utilization limit reached (limit: %.3f, utilization: %.3f)", resType, limit, utilization) + } + } + + return nil +} diff --git a/pkg/util/resource/monitor.go b/pkg/util/resource/monitor.go new file mode 100644 index 00000000000..057fe4e1c87 --- /dev/null +++ b/pkg/util/resource/monitor.go @@ -0,0 +1,171 @@ +package resource + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/cortexproject/cortex/pkg/util/services" +) + +const ( + CPU Type = "cpu" + Heap Type = "heap" + + monitorInterval = 100 * time.Millisecond + dataPointsToAvg = 50 +) + +type Type string + +type IMonitor interface { + GetCPUUtilization() float64 + GetHeapUtilization() float64 +} + +type Monitor struct { + services.Service + + scanners map[Type]scanner + containerLimit map[Type]float64 + utilization map[Type]float64 + + // Variables to calculate average CPU utilization + index int + cpuRates [dataPointsToAvg]float64 + cpuIntervals [dataPointsToAvg]float64 + totalCPU float64 + totalInterval float64 + lastCPU float64 + lastUpdate time.Time + + lock sync.RWMutex +} + +func NewMonitor(limits map[Type]float64, registerer prometheus.Registerer) (*Monitor, error) { + m := &Monitor{ + containerLimit: limits, + scanners: make(map[Type]scanner), + + cpuRates: [dataPointsToAvg]float64{}, + cpuIntervals: [dataPointsToAvg]float64{}, + + lock: sync.RWMutex{}, + } + + m.Service = services.NewBasicService(nil, m.running, nil) + + for resType, limit := range limits { + var scannerFunc func() (scanner, error) + var gaugeFunc func() float64 + + switch resType { + case CPU: + scannerFunc = newCPUScanner + gaugeFunc = m.GetCPUUtilization + case Heap: + scannerFunc = newHeapScanner + gaugeFunc = m.GetHeapUtilization + default: + return nil, fmt.Errorf("no scanner available for resource type: [%s]", resType) + } + + s, err := scannerFunc() + if err != nil { + return nil, err + } + m.scanners[resType] = s + m.containerLimit[resType] = limit + + promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ + Name: "cortex_resource_utilization", + ConstLabels: map[string]string{"resource": string(resType)}, + }, gaugeFunc) + } + + return m, nil +} + +func (m *Monitor) running(ctx context.Context) error { + ticker := time.NewTicker(monitorInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return nil + + case <-ticker.C: + for resType, scanner := range m.scanners { + val, err := scanner.scan() + if err != nil { + return fmt.Errorf("error scanning resource %s", resType) + } + + switch resType { + case CPU: + m.storeCPUUtilization(val) + case Heap: + m.storeHeapUtilization(val) + } + } + } + } +} + +func (m *Monitor) storeCPUUtilization(cpuTime float64) { + m.lock.Lock() + defer m.lock.Unlock() + + now := time.Now() + + if m.lastUpdate.IsZero() { + m.lastCPU = cpuTime + m.lastUpdate = now + return + } + + m.totalCPU -= m.cpuRates[m.index] + m.totalInterval -= m.cpuIntervals[m.index] + + m.cpuRates[m.index] = cpuTime - m.lastCPU + m.cpuIntervals[m.index] = now.Sub(m.lastUpdate).Seconds() + + m.totalCPU += m.cpuRates[m.index] + m.totalInterval += m.cpuIntervals[m.index] + + m.lastCPU = cpuTime + m.lastUpdate = now + m.index = (m.index + 1) % dataPointsToAvg + + if m.totalInterval > 0 && m.containerLimit[CPU] > 0 { + m.utilization[CPU] = m.totalCPU / m.totalInterval / m.containerLimit[CPU] + } +} + +func (m *Monitor) GetCPUUtilization() float64 { + m.lock.RLock() + defer m.lock.RUnlock() + + return m.utilization[CPU] +} + +func (m *Monitor) storeHeapUtilization(val float64) { + m.lock.Lock() + defer m.lock.Unlock() + + if m.containerLimit[Heap] > 0 { + m.utilization[Heap] = val / m.containerLimit[Heap] + } +} + +func (m *Monitor) GetHeapUtilization() float64 { + m.lock.RLock() + defer m.lock.RUnlock() + + return m.utilization[Heap] +} diff --git a/pkg/util/resource/scanner.go b/pkg/util/resource/scanner.go new file mode 100644 index 00000000000..1d4f0906c70 --- /dev/null +++ b/pkg/util/resource/scanner.go @@ -0,0 +1,43 @@ +package resource + +import ( + "fmt" + "runtime/metrics" +) + +const ( + heapMetricName = "/memory/classes/Heap/objects:bytes" +) + +type scanner interface { + scan() (float64, error) +} + +type noopScanner struct{} + +func (s *noopScanner) scan() (float64, error) { + return 0, nil +} + +type heapScanner struct { + metricSamples []metrics.Sample +} + +func newHeapScanner() (scanner, error) { + metricSamples := make([]metrics.Sample, 1) + metricSamples[0].Name = heapMetricName + metrics.Read(metricSamples) + + for _, sample := range metricSamples { + if sample.Value.Kind() == metrics.KindBad { + return nil, fmt.Errorf("metric %s is not supported", sample.Name) + } + } + + return &heapScanner{metricSamples: metricSamples}, nil +} + +func (s *heapScanner) scan() (float64, error) { + metrics.Read(s.metricSamples) + return s.metricSamples[0].Value.Float64(), nil +} diff --git a/pkg/util/resource/scanner_darwin.go b/pkg/util/resource/scanner_darwin.go new file mode 100644 index 00000000000..1d4aedfb555 --- /dev/null +++ b/pkg/util/resource/scanner_darwin.go @@ -0,0 +1,14 @@ +//go:build darwin + +package resource + +import ( + "github.com/go-kit/log/level" + + "github.com/cortexproject/cortex/pkg/util/log" +) + +func newCPUScanner() (scanner, error) { + level.Warn(log.Logger).Log("msg", "CPU scanner not supported in darwin.") + return &noopScanner{}, nil +} diff --git a/pkg/util/resource/scanner_linux.go b/pkg/util/resource/scanner_linux.go new file mode 100644 index 00000000000..7746706d432 --- /dev/null +++ b/pkg/util/resource/scanner_linux.go @@ -0,0 +1,30 @@ +//go:build linux + +package resource + +import ( + "github.com/pkg/errors" + "github.com/prometheus/procfs" +) + +type cpuScanner struct { + proc procfs.Proc +} + +func newCPUScanner() (scanner, error) { + proc, err := procfs.Self() + if err != nil { + return nil, errors.Wrap(err, "error reading proc directory") + } + + return &cpuScanner{proc: proc}, nil +} + +func (s *cpuScanner) scan() (float64, error) { + stat, err := s.proc.Stat() + if err != nil { + return 0, err + } + + return stat.CPUTime(), nil +} diff --git a/pkg/util/resource/scanner_test.go b/pkg/util/resource/scanner_test.go new file mode 100644 index 00000000000..0e632d21e7f --- /dev/null +++ b/pkg/util/resource/scanner_test.go @@ -0,0 +1,14 @@ +package resource + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_NoopScanner(t *testing.T) { + s := noopScanner{} + val, err := s.scan() + require.NoError(t, err) + require.Zero(t, val) +} diff --git a/pkg/util/strings.go b/pkg/util/strings.go index a61e622e101..4965dc52a5e 100644 --- a/pkg/util/strings.go +++ b/pkg/util/strings.go @@ -157,12 +157,21 @@ type Interner interface { // NewLruInterner returns a new Interner to be used to intern strings. // The interner will use a LRU cache to return the deduplicated strings -func NewLruInterner() Interner { +func NewLruInterner(enabled bool) Interner { + if !enabled { + return &noOpInterner{} + } return &pool{ lru: expirable.NewLRU[string, string](maxInternerLruCacheSize, nil, internerLruCacheTTL), } } +type noOpInterner struct{} + +func (n noOpInterner) Intern(s string) string { + return s +} + type pool struct { lru *expirable.LRU[string, string] } diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index 152e51ff622..84596f17950 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -183,7 +183,7 @@ type Limits struct { // Ruler defaults and limits. RulerEvaluationDelay model.Duration `yaml:"ruler_evaluation_delay_duration" json:"ruler_evaluation_delay_duration"` - RulerTenantShardSize int `yaml:"ruler_tenant_shard_size" json:"ruler_tenant_shard_size"` + RulerTenantShardSize float64 `yaml:"ruler_tenant_shard_size" json:"ruler_tenant_shard_size"` RulerMaxRulesPerRuleGroup int `yaml:"ruler_max_rules_per_rule_group" json:"ruler_max_rules_per_rule_group"` RulerMaxRuleGroupsPerTenant int `yaml:"ruler_max_rule_groups_per_tenant" json:"ruler_max_rule_groups_per_tenant"` RulerQueryOffset model.Duration `yaml:"ruler_query_offset" json:"ruler_query_offset"` @@ -283,7 +283,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxOutstandingPerTenant, "frontend.max-outstanding-requests-per-tenant", 100, "Maximum number of outstanding requests per tenant per request queue (either query frontend or query scheduler); requests beyond this error with HTTP 429.") f.Var(&l.RulerEvaluationDelay, "ruler.evaluation-delay-duration", "Deprecated(use ruler.query-offset instead) and will be removed in v1.19.0: Duration to delay the evaluation of rules to ensure the underlying metrics have been pushed to Cortex.") - f.IntVar(&l.RulerTenantShardSize, "ruler.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used by ruler. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.") + f.Float64Var(&l.RulerTenantShardSize, "ruler.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used by ruler. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant. If the value is < 1 the shard size will be a percentage of the total rulers.") f.IntVar(&l.RulerMaxRulesPerRuleGroup, "ruler.max-rules-per-rule-group", 0, "Maximum number of rules per rule group per-tenant. 0 to disable.") f.IntVar(&l.RulerMaxRuleGroupsPerTenant, "ruler.max-rule-groups-per-tenant", 0, "Maximum number of rule groups per-tenant. 0 to disable.") f.Var(&l.RulerQueryOffset, "ruler.query-offset", "Duration to offset all rule evaluation queries per-tenant.") @@ -838,7 +838,7 @@ func (o *Overrides) MetricRelabelConfigs(userID string) []*relabel.Config { } // RulerTenantShardSize returns shard size (number of rulers) used by this tenant when using shuffle-sharding strategy. -func (o *Overrides) RulerTenantShardSize(userID string) int { +func (o *Overrides) RulerTenantShardSize(userID string) float64 { return o.GetOverridesForUser(userID).RulerTenantShardSize } diff --git a/vendor/github.com/golang-jwt/jwt/v5/README.md b/vendor/github.com/golang-jwt/jwt/v5/README.md index 964598a3173..0bb636f2224 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/README.md +++ b/vendor/github.com/golang-jwt/jwt/v5/README.md @@ -10,11 +10,11 @@ implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519). Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) -this project adds Go module support, but maintains backwards compatibility with +this project adds Go module support, but maintains backward compatibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version v5.0.0 introduces major improvements to the validation of tokens, but is not -entirely backwards compatible. +entirely backward compatible. > After the original author of the library suggested migrating the maintenance > of `jwt-go`, a dedicated team of open source maintainers decided to clone the @@ -24,7 +24,7 @@ entirely backwards compatible. **SECURITY NOTICE:** Some older versions of Go have a security issue in the -crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue +crypto/elliptic. The recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail. @@ -32,7 +32,7 @@ detail. what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key -types match the expected alg, but you should take the extra step to verify it in +types to match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. ### Supported Go versions @@ -41,7 +41,7 @@ Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy). So we will support a major version of Go until there are two newer major releases. We no longer support building jwt-go with unsupported Go versions, as these contain security -vulnerabilities which will not be fixed. +vulnerabilities that will not be fixed. ## What the heck is a JWT? @@ -117,7 +117,7 @@ notable differences: This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few -backwards-incompatible changes outside of major version updates (and only with +backward-incompatible changes outside of major version updates (and only with good reason). This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull @@ -125,8 +125,8 @@ requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases). -**BREAKING CHANGES:*** A full list of breaking changes is available in -`VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating +**BREAKING CHANGES:** A full list of breaking changes is available in +`VERSION_HISTORY.md`. See [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information on updating your code. ## Extensions diff --git a/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md index b08402c3427..2740597f179 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md +++ b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md @@ -2,11 +2,11 @@ ## Supported Versions -As of February 2022 (and until this document is updated), the latest version `v4` is supported. +As of November 2024 (and until this document is updated), the latest version `v5` is supported. In critical cases, we might supply back-ported patches for `v4`. ## Reporting a Vulnerability -If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s). +If you think you found a vulnerability, and even if you are not sure, please report it a [GitHub Security Advisory](https://github.com/golang-jwt/jwt/security/advisories/new). Please try be explicit, describe steps to reproduce the security issue with code example(s). You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem. diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser.go b/vendor/github.com/golang-jwt/jwt/v5/parser.go index ecf99af78f9..054c7eb6ff5 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/parser.go +++ b/vendor/github.com/golang-jwt/jwt/v5/parser.go @@ -8,6 +8,8 @@ import ( "strings" ) +const tokenDelimiter = "." + type Parser struct { // If populated, only these methods will be considered valid. validMethods []string @@ -136,9 +138,10 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf // It's only ever useful in cases where you know the signature is valid (since it has already // been or will be checked elsewhere in the stack) and you want to extract values from it. func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { - parts = strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, parts, newError("token contains an invalid number of segments", ErrTokenMalformed) + var ok bool + parts, ok = splitToken(tokenString) + if !ok { + return nil, nil, newError("token contains an invalid number of segments", ErrTokenMalformed) } token = &Token{Raw: tokenString} @@ -196,6 +199,33 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke return token, parts, nil } +// splitToken splits a token string into three parts: header, claims, and signature. It will only +// return true if the token contains exactly two delimiters and three parts. In all other cases, it +// will return nil parts and false. +func splitToken(token string) ([]string, bool) { + parts := make([]string, 3) + header, remain, ok := strings.Cut(token, tokenDelimiter) + if !ok { + return nil, false + } + parts[0] = header + claims, remain, ok := strings.Cut(remain, tokenDelimiter) + if !ok { + return nil, false + } + parts[1] = claims + // One more cut to ensure the signature is the last part of the token and there are no more + // delimiters. This avoids an issue where malicious input could contain additional delimiters + // causing unecessary overhead parsing tokens. + signature, _, unexpected := strings.Cut(remain, tokenDelimiter) + if unexpected { + return nil, false + } + parts[2] = signature + + return parts, true +} + // DecodeSegment decodes a JWT specific base64url encoding. This function will // take into account whether the [Parser] is configured with additional options, // such as [WithStrictDecoding] or [WithPaddingAllowed]. diff --git a/vendor/github.com/golang-jwt/jwt/v5/token.go b/vendor/github.com/golang-jwt/jwt/v5/token.go index 352873a2d9c..9c7f4ab010c 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/token.go +++ b/vendor/github.com/golang-jwt/jwt/v5/token.go @@ -75,7 +75,7 @@ func (t *Token) SignedString(key interface{}) (string, error) { } // SigningString generates the signing string. This is the most expensive part -// of the whole deal. Unless you need this for something special, just go +// of the whole deal. Unless you need this for something special, just go // straight for the SignedString. func (t *Token) SigningString() (string, error) { h, err := json.Marshal(t.Header) diff --git a/vendor/go.uber.org/automaxprocs/.codecov.yml b/vendor/go.uber.org/automaxprocs/.codecov.yml new file mode 100644 index 00000000000..9a2ed4a9969 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/.codecov.yml @@ -0,0 +1,14 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 90% # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure diff --git a/vendor/go.uber.org/automaxprocs/.gitignore b/vendor/go.uber.org/automaxprocs/.gitignore new file mode 100644 index 00000000000..dd7bcf5130b --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/.gitignore @@ -0,0 +1,33 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.pprof +*.out +*.log +coverage.txt + +/bin +cover.out +cover.html diff --git a/vendor/go.uber.org/automaxprocs/CHANGELOG.md b/vendor/go.uber.org/automaxprocs/CHANGELOG.md new file mode 100644 index 00000000000..f421056ae82 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CHANGELOG.md @@ -0,0 +1,52 @@ +# Changelog + +## v1.6.0 (2024-07-24) + +- Add RoundQuotaFunc option that allows configuration of rounding + behavior for floating point CPU quota. + +## v1.5.3 (2023-07-19) + +- Fix mountinfo parsing when super options have fields with spaces. +- Fix division by zero while parsing cgroups. + +## v1.5.2 (2023-03-16) + +- Support child control cgroups +- Fix file descriptor leak +- Update dependencies + +## v1.5.1 (2022-04-06) + +- Fix cgroups v2 mountpoint detection. + +## v1.5.0 (2022-04-05) + +- Add support for cgroups v2. + +Thanks to @emadolsky for their contribution to this release. + +## v1.4.0 (2021-02-01) + +- Support colons in cgroup names. +- Remove linters from runtime dependencies. + +## v1.3.0 (2020-01-23) + +- Migrate to Go modules. + +## v1.2.0 (2018-02-22) + +- Fixed quota clamping to always round down rather than up; Rather than + guaranteeing constant throttling at saturation, instead assume that the + fractional CPU was added as a hedge for factors outside of Go's scheduler. + +## v1.1.0 (2017-11-10) + +- Log the new value of `GOMAXPROCS` rather than the current value. +- Make logs more explicit about whether `GOMAXPROCS` was modified or not. +- Allow customization of the minimum `GOMAXPROCS`, and modify default from 2 to 1. + +## v1.0.0 (2017-08-09) + +- Initial release. diff --git a/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md b/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..e327d9aa5cd --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at oss-conduct@uber.com. The project +team will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md b/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md new file mode 100644 index 00000000000..2b6a6040d78 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md @@ -0,0 +1,81 @@ +# Contributing + +We'd love your help improving this package! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. In your issue, pull request, and any other +communications, please remember to treat your fellow contributors with +respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +``` +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/automaxprocs.git +cd automaxprocs +git remote add upstream https://github.com/uber-go/automaxprocs.git +git fetch upstream +``` + +Install the test dependencies: + +``` +make dependencies +``` + +Make sure that the tests and the linters pass: + +``` +make test +make lint +``` + +If you're not using the minor version of Go specified in the Makefile's +`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is +fine, but it means that you'll only discover lint failures after you open your +pull request. + +## Making Changes + +Start by creating a new branch for your changes: + +``` +cd $GOPATH/src/go.uber.org/automaxprocs +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +``` +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We *try* to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +* Add tests for new functionality. +* Write a [good commit message][commit-message]. +* Maintain backward compatibility. + +[fork]: https://github.com/uber-go/automaxprocs/fork +[open-issue]: https://github.com/uber-go/automaxprocs/issues/new +[cla]: https://cla-assistant.io/uber-go/automaxprocs +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/automaxprocs/LICENSE b/vendor/go.uber.org/automaxprocs/LICENSE new file mode 100644 index 00000000000..20dcf51d96d --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/go.uber.org/automaxprocs/Makefile b/vendor/go.uber.org/automaxprocs/Makefile new file mode 100644 index 00000000000..1642b714801 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/Makefile @@ -0,0 +1,46 @@ +export GOBIN ?= $(shell pwd)/bin + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +GOLINT = $(GOBIN)/golint +STATICCHECK = $(GOBIN)/staticcheck + +.PHONY: build +build: + go build ./... + +.PHONY: install +install: + go mod download + +.PHONY: test +test: + go test -race ./... + +.PHONY: cover +cover: + go test -coverprofile=cover.out -covermode=atomic -coverpkg=./... ./... + go tool cover -html=cover.out -o cover.html + +$(GOLINT): tools/go.mod + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): tools/go.mod + cd tools && go install honnef.co/go/tools/cmd/staticcheck@2023.1.2 + +.PHONY: lint +lint: $(GOLINT) $(STATICCHECK) + @rm -rf lint.log + @echo "Checking gofmt" + @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log + @echo "Checking go vet" + @go vet ./... 2>&1 | tee -a lint.log + @echo "Checking golint" + @$(GOLINT) ./... | tee -a lint.log + @echo "Checking staticcheck" + @$(STATICCHECK) ./... 2>&1 | tee -a lint.log + @echo "Checking for license headers..." + @./.build/check_license.sh | tee -a lint.log + @[ ! -s lint.log ] diff --git a/vendor/go.uber.org/automaxprocs/README.md b/vendor/go.uber.org/automaxprocs/README.md new file mode 100644 index 00000000000..bfed32adae8 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/README.md @@ -0,0 +1,71 @@ +# automaxprocs [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Automatically set `GOMAXPROCS` to match Linux container CPU quota. + +## Installation + +`go get -u go.uber.org/automaxprocs` + +## Quick Start + +```go +import _ "go.uber.org/automaxprocs" + +func main() { + // Your application logic here. +} +``` + +# Performance +Data measured from Uber's internal load balancer. We ran the load balancer with 200% CPU quota (i.e., 2 cores): + +| GOMAXPROCS | RPS | P50 (ms) | P99.9 (ms) | +| ------------------ | --------- | -------- | ---------- | +| 1 | 28,893.18 | 1.46 | 19.70 | +| 2 (equal to quota) | 44,715.07 | 0.84 | 26.38 | +| 3 | 44,212.93 | 0.66 | 30.07 | +| 4 | 41,071.15 | 0.57 | 42.94 | +| 8 | 33,111.69 | 0.43 | 64.32 | +| Default (24) | 22,191.40 | 0.45 | 76.19 | + +When `GOMAXPROCS` is increased above the CPU quota, we see P50 decrease slightly, but see significant increases to P99. We also see that the total RPS handled also decreases. + +When `GOMAXPROCS` is higher than the CPU quota allocated, we also saw significant throttling: + +``` +$ cat /sys/fs/cgroup/cpu,cpuacct/system.slice/[...]/cpu.stat +nr_periods 42227334 +nr_throttled 131923 +throttled_time 88613212216618 +``` + +Once `GOMAXPROCS` was reduced to match the CPU quota, we saw no CPU throttling. + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +automaxprocs to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The automaxprocs maintainers keep +an eye on issues and pull requests, but you can also report any negative +conduct to oss-conduct@uber.com. That email list is a private, safe space; +even the automaxprocs maintainers don't have access, so don't hesitate to hold +us to a high standard. + +
+ +Released under the [MIT License](LICENSE). + +[doc-img]: https://godoc.org/go.uber.org/automaxprocs?status.svg +[doc]: https://godoc.org/go.uber.org/automaxprocs +[ci-img]: https://github.com/uber-go/automaxprocs/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/automaxprocs/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/automaxprocs/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/automaxprocs + + diff --git a/vendor/go.uber.org/automaxprocs/automaxprocs.go b/vendor/go.uber.org/automaxprocs/automaxprocs.go new file mode 100644 index 00000000000..69946a3e1fd --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/automaxprocs.go @@ -0,0 +1,33 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package automaxprocs automatically sets GOMAXPROCS to match the Linux +// container CPU quota, if any. +package automaxprocs // import "go.uber.org/automaxprocs" + +import ( + "log" + + "go.uber.org/automaxprocs/maxprocs" +) + +func init() { + maxprocs.Set(maxprocs.Logger(log.Printf)) +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go new file mode 100644 index 00000000000..fe4ecf561e2 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go @@ -0,0 +1,79 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "io" + "os" + "path/filepath" + "strconv" +) + +// CGroup represents the data structure for a Linux control group. +type CGroup struct { + path string +} + +// NewCGroup returns a new *CGroup from a given path. +func NewCGroup(path string) *CGroup { + return &CGroup{path: path} +} + +// Path returns the path of the CGroup*. +func (cg *CGroup) Path() string { + return cg.path +} + +// ParamPath returns the path of the given cgroup param under itself. +func (cg *CGroup) ParamPath(param string) string { + return filepath.Join(cg.path, param) +} + +// readFirstLine reads the first line from a cgroup param file. +func (cg *CGroup) readFirstLine(param string) (string, error) { + paramFile, err := os.Open(cg.ParamPath(param)) + if err != nil { + return "", err + } + defer paramFile.Close() + + scanner := bufio.NewScanner(paramFile) + if scanner.Scan() { + return scanner.Text(), nil + } + if err := scanner.Err(); err != nil { + return "", err + } + return "", io.ErrUnexpectedEOF +} + +// readInt parses the first line from a cgroup param file as int. +func (cg *CGroup) readInt(param string) (int, error) { + text, err := cg.readFirstLine(param) + if err != nil { + return 0, err + } + return strconv.Atoi(text) +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go new file mode 100644 index 00000000000..e89f5436028 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go @@ -0,0 +1,118 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +const ( + // _cgroupFSType is the Linux CGroup file system type used in + // `/proc/$PID/mountinfo`. + _cgroupFSType = "cgroup" + // _cgroupSubsysCPU is the CPU CGroup subsystem. + _cgroupSubsysCPU = "cpu" + // _cgroupSubsysCPUAcct is the CPU accounting CGroup subsystem. + _cgroupSubsysCPUAcct = "cpuacct" + // _cgroupSubsysCPUSet is the CPUSet CGroup subsystem. + _cgroupSubsysCPUSet = "cpuset" + // _cgroupSubsysMemory is the Memory CGroup subsystem. + _cgroupSubsysMemory = "memory" + + // _cgroupCPUCFSQuotaUsParam is the file name for the CGroup CFS quota + // parameter. + _cgroupCPUCFSQuotaUsParam = "cpu.cfs_quota_us" + // _cgroupCPUCFSPeriodUsParam is the file name for the CGroup CFS period + // parameter. + _cgroupCPUCFSPeriodUsParam = "cpu.cfs_period_us" +) + +const ( + _procPathCGroup = "/proc/self/cgroup" + _procPathMountInfo = "/proc/self/mountinfo" +) + +// CGroups is a map that associates each CGroup with its subsystem name. +type CGroups map[string]*CGroup + +// NewCGroups returns a new *CGroups from given `mountinfo` and `cgroup` files +// under for some process under `/proc` file system (see also proc(5) for more +// information). +func NewCGroups(procPathMountInfo, procPathCGroup string) (CGroups, error) { + cgroupSubsystems, err := parseCGroupSubsystems(procPathCGroup) + if err != nil { + return nil, err + } + + cgroups := make(CGroups) + newMountPoint := func(mp *MountPoint) error { + if mp.FSType != _cgroupFSType { + return nil + } + + for _, opt := range mp.SuperOptions { + subsys, exists := cgroupSubsystems[opt] + if !exists { + continue + } + + cgroupPath, err := mp.Translate(subsys.Name) + if err != nil { + return err + } + cgroups[opt] = NewCGroup(cgroupPath) + } + + return nil + } + + if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil { + return nil, err + } + return cgroups, nil +} + +// NewCGroupsForCurrentProcess returns a new *CGroups instance for the current +// process. +func NewCGroupsForCurrentProcess() (CGroups, error) { + return NewCGroups(_procPathMountInfo, _procPathCGroup) +} + +// CPUQuota returns the CPU quota applied with the CPU cgroup controller. +// It is a result of `cpu.cfs_quota_us / cpu.cfs_period_us`. If the value of +// `cpu.cfs_quota_us` was not set (-1), the method returns `(-1, nil)`. +func (cg CGroups) CPUQuota() (float64, bool, error) { + cpuCGroup, exists := cg[_cgroupSubsysCPU] + if !exists { + return -1, false, nil + } + + cfsQuotaUs, err := cpuCGroup.readInt(_cgroupCPUCFSQuotaUsParam) + if defined := cfsQuotaUs > 0; err != nil || !defined { + return -1, defined, err + } + + cfsPeriodUs, err := cpuCGroup.readInt(_cgroupCPUCFSPeriodUsParam) + if defined := cfsPeriodUs > 0; err != nil || !defined { + return -1, defined, err + } + + return float64(cfsQuotaUs) / float64(cfsPeriodUs), true, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go new file mode 100644 index 00000000000..78556062fe2 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go @@ -0,0 +1,176 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "path" + "strconv" + "strings" +) + +const ( + // _cgroupv2CPUMax is the file name for the CGroup-V2 CPU max and period + // parameter. + _cgroupv2CPUMax = "cpu.max" + // _cgroupFSType is the Linux CGroup-V2 file system type used in + // `/proc/$PID/mountinfo`. + _cgroupv2FSType = "cgroup2" + + _cgroupv2MountPoint = "/sys/fs/cgroup" + + _cgroupV2CPUMaxDefaultPeriod = 100000 + _cgroupV2CPUMaxQuotaMax = "max" +) + +const ( + _cgroupv2CPUMaxQuotaIndex = iota + _cgroupv2CPUMaxPeriodIndex +) + +// ErrNotV2 indicates that the system is not using cgroups2. +var ErrNotV2 = errors.New("not using cgroups2") + +// CGroups2 provides access to cgroups data for systems using cgroups2. +type CGroups2 struct { + mountPoint string + groupPath string + cpuMaxFile string +} + +// NewCGroups2ForCurrentProcess builds a CGroups2 for the current process. +// +// This returns ErrNotV2 if the system is not using cgroups2. +func NewCGroups2ForCurrentProcess() (*CGroups2, error) { + return newCGroups2From(_procPathMountInfo, _procPathCGroup) +} + +func newCGroups2From(mountInfoPath, procPathCGroup string) (*CGroups2, error) { + isV2, err := isCGroupV2(mountInfoPath) + if err != nil { + return nil, err + } + + if !isV2 { + return nil, ErrNotV2 + } + + subsystems, err := parseCGroupSubsystems(procPathCGroup) + if err != nil { + return nil, err + } + + // Find v2 subsystem by looking for the `0` id + var v2subsys *CGroupSubsys + for _, subsys := range subsystems { + if subsys.ID == 0 { + v2subsys = subsys + break + } + } + + if v2subsys == nil { + return nil, ErrNotV2 + } + + return &CGroups2{ + mountPoint: _cgroupv2MountPoint, + groupPath: v2subsys.Name, + cpuMaxFile: _cgroupv2CPUMax, + }, nil +} + +func isCGroupV2(procPathMountInfo string) (bool, error) { + var ( + isV2 bool + newMountPoint = func(mp *MountPoint) error { + isV2 = isV2 || (mp.FSType == _cgroupv2FSType && mp.MountPoint == _cgroupv2MountPoint) + return nil + } + ) + + if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil { + return false, err + } + + return isV2, nil +} + +// CPUQuota returns the CPU quota applied with the CPU cgroup2 controller. +// It is a result of reading cpu quota and period from cpu.max file. +// It will return `cpu.max / cpu.period`. If cpu.max is set to max, it returns +// (-1, false, nil) +func (cg *CGroups2) CPUQuota() (float64, bool, error) { + cpuMaxParams, err := os.Open(path.Join(cg.mountPoint, cg.groupPath, cg.cpuMaxFile)) + if err != nil { + if os.IsNotExist(err) { + return -1, false, nil + } + return -1, false, err + } + defer cpuMaxParams.Close() + + scanner := bufio.NewScanner(cpuMaxParams) + if scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 || len(fields) > 2 { + return -1, false, fmt.Errorf("invalid format") + } + + if fields[_cgroupv2CPUMaxQuotaIndex] == _cgroupV2CPUMaxQuotaMax { + return -1, false, nil + } + + max, err := strconv.Atoi(fields[_cgroupv2CPUMaxQuotaIndex]) + if err != nil { + return -1, false, err + } + + var period int + if len(fields) == 1 { + period = _cgroupV2CPUMaxDefaultPeriod + } else { + period, err = strconv.Atoi(fields[_cgroupv2CPUMaxPeriodIndex]) + if err != nil { + return -1, false, err + } + + if period == 0 { + return -1, false, errors.New("zero value for period is not allowed") + } + } + + return float64(max) / float64(period), true, nil + } + + if err := scanner.Err(); err != nil { + return -1, false, err + } + + return 0, false, io.ErrUnexpectedEOF +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go new file mode 100644 index 00000000000..113555f63da --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package cgroups provides utilities to access Linux control group (CGroups) +// parameters (CPU quota, for example) for a given process. +package cgroups diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go new file mode 100644 index 00000000000..94ac75a46e8 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go @@ -0,0 +1,52 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import "fmt" + +type cgroupSubsysFormatInvalidError struct { + line string +} + +type mountPointFormatInvalidError struct { + line string +} + +type pathNotExposedFromMountPointError struct { + mountPoint string + root string + path string +} + +func (err cgroupSubsysFormatInvalidError) Error() string { + return fmt.Sprintf("invalid format for CGroupSubsys: %q", err.line) +} + +func (err mountPointFormatInvalidError) Error() string { + return fmt.Sprintf("invalid format for MountPoint: %q", err.line) +} + +func (err pathNotExposedFromMountPointError) Error() string { + return fmt.Sprintf("path %q is not a descendant of mount point root %q and cannot be exposed from %q", err.path, err.root, err.mountPoint) +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go new file mode 100644 index 00000000000..f3877f78aa6 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go @@ -0,0 +1,171 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" +) + +const ( + _mountInfoSep = " " + _mountInfoOptsSep = "," + _mountInfoOptionalFieldsSep = "-" +) + +const ( + _miFieldIDMountID = iota + _miFieldIDParentID + _miFieldIDDeviceID + _miFieldIDRoot + _miFieldIDMountPoint + _miFieldIDOptions + _miFieldIDOptionalFields + + _miFieldCountFirstHalf +) + +const ( + _miFieldOffsetFSType = iota + _miFieldOffsetMountSource + _miFieldOffsetSuperOptions + + _miFieldCountSecondHalf +) + +const _miFieldCountMin = _miFieldCountFirstHalf + _miFieldCountSecondHalf + +// MountPoint is the data structure for the mount points in +// `/proc/$PID/mountinfo`. See also proc(5) for more information. +type MountPoint struct { + MountID int + ParentID int + DeviceID string + Root string + MountPoint string + Options []string + OptionalFields []string + FSType string + MountSource string + SuperOptions []string +} + +// NewMountPointFromLine parses a line read from `/proc/$PID/mountinfo` and +// returns a new *MountPoint. +func NewMountPointFromLine(line string) (*MountPoint, error) { + fields := strings.Split(line, _mountInfoSep) + + if len(fields) < _miFieldCountMin { + return nil, mountPointFormatInvalidError{line} + } + + mountID, err := strconv.Atoi(fields[_miFieldIDMountID]) + if err != nil { + return nil, err + } + + parentID, err := strconv.Atoi(fields[_miFieldIDParentID]) + if err != nil { + return nil, err + } + + for i, field := range fields[_miFieldIDOptionalFields:] { + if field == _mountInfoOptionalFieldsSep { + // End of optional fields. + fsTypeStart := _miFieldIDOptionalFields + i + 1 + + // Now we know where the optional fields end, split the line again with a + // limit to avoid issues with spaces in super options as present on WSL. + fields = strings.SplitN(line, _mountInfoSep, fsTypeStart+_miFieldCountSecondHalf) + if len(fields) != fsTypeStart+_miFieldCountSecondHalf { + return nil, mountPointFormatInvalidError{line} + } + + miFieldIDFSType := _miFieldOffsetFSType + fsTypeStart + miFieldIDMountSource := _miFieldOffsetMountSource + fsTypeStart + miFieldIDSuperOptions := _miFieldOffsetSuperOptions + fsTypeStart + + return &MountPoint{ + MountID: mountID, + ParentID: parentID, + DeviceID: fields[_miFieldIDDeviceID], + Root: fields[_miFieldIDRoot], + MountPoint: fields[_miFieldIDMountPoint], + Options: strings.Split(fields[_miFieldIDOptions], _mountInfoOptsSep), + OptionalFields: fields[_miFieldIDOptionalFields:(fsTypeStart - 1)], + FSType: fields[miFieldIDFSType], + MountSource: fields[miFieldIDMountSource], + SuperOptions: strings.Split(fields[miFieldIDSuperOptions], _mountInfoOptsSep), + }, nil + } + } + + return nil, mountPointFormatInvalidError{line} +} + +// Translate converts an absolute path inside the *MountPoint's file system to +// the host file system path in the mount namespace the *MountPoint belongs to. +func (mp *MountPoint) Translate(absPath string) (string, error) { + relPath, err := filepath.Rel(mp.Root, absPath) + + if err != nil { + return "", err + } + if relPath == ".." || strings.HasPrefix(relPath, "../") { + return "", pathNotExposedFromMountPointError{ + mountPoint: mp.MountPoint, + root: mp.Root, + path: absPath, + } + } + + return filepath.Join(mp.MountPoint, relPath), nil +} + +// parseMountInfo parses procPathMountInfo (usually at `/proc/$PID/mountinfo`) +// and yields parsed *MountPoint into newMountPoint. +func parseMountInfo(procPathMountInfo string, newMountPoint func(*MountPoint) error) error { + mountInfoFile, err := os.Open(procPathMountInfo) + if err != nil { + return err + } + defer mountInfoFile.Close() + + scanner := bufio.NewScanner(mountInfoFile) + + for scanner.Scan() { + mountPoint, err := NewMountPointFromLine(scanner.Text()) + if err != nil { + return err + } + if err := newMountPoint(mountPoint); err != nil { + return err + } + } + + return scanner.Err() +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go new file mode 100644 index 00000000000..cddc3eaec39 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go @@ -0,0 +1,103 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +const ( + _cgroupSep = ":" + _cgroupSubsysSep = "," +) + +const ( + _csFieldIDID = iota + _csFieldIDSubsystems + _csFieldIDName + _csFieldCount +) + +// CGroupSubsys represents the data structure for entities in +// `/proc/$PID/cgroup`. See also proc(5) for more information. +type CGroupSubsys struct { + ID int + Subsystems []string + Name string +} + +// NewCGroupSubsysFromLine returns a new *CGroupSubsys by parsing a string in +// the format of `/proc/$PID/cgroup` +func NewCGroupSubsysFromLine(line string) (*CGroupSubsys, error) { + fields := strings.SplitN(line, _cgroupSep, _csFieldCount) + + if len(fields) != _csFieldCount { + return nil, cgroupSubsysFormatInvalidError{line} + } + + id, err := strconv.Atoi(fields[_csFieldIDID]) + if err != nil { + return nil, err + } + + cgroup := &CGroupSubsys{ + ID: id, + Subsystems: strings.Split(fields[_csFieldIDSubsystems], _cgroupSubsysSep), + Name: fields[_csFieldIDName], + } + + return cgroup, nil +} + +// parseCGroupSubsystems parses procPathCGroup (usually at `/proc/$PID/cgroup`) +// and returns a new map[string]*CGroupSubsys. +func parseCGroupSubsystems(procPathCGroup string) (map[string]*CGroupSubsys, error) { + cgroupFile, err := os.Open(procPathCGroup) + if err != nil { + return nil, err + } + defer cgroupFile.Close() + + scanner := bufio.NewScanner(cgroupFile) + subsystems := make(map[string]*CGroupSubsys) + + for scanner.Scan() { + cgroup, err := NewCGroupSubsysFromLine(scanner.Text()) + if err != nil { + return nil, err + } + for _, subsys := range cgroup.Subsystems { + subsystems[subsys] = cgroup + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return subsystems, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go new file mode 100644 index 00000000000..f9057fd2731 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go @@ -0,0 +1,75 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package runtime + +import ( + "errors" + + cg "go.uber.org/automaxprocs/internal/cgroups" +) + +// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process +// to a valid GOMAXPROCS value. The quota is converted from float to int using round. +// If round == nil, DefaultRoundFunc is used. +func CPUQuotaToGOMAXPROCS(minValue int, round func(v float64) int) (int, CPUQuotaStatus, error) { + if round == nil { + round = DefaultRoundFunc + } + cgroups, err := _newQueryer() + if err != nil { + return -1, CPUQuotaUndefined, err + } + + quota, defined, err := cgroups.CPUQuota() + if !defined || err != nil { + return -1, CPUQuotaUndefined, err + } + + maxProcs := round(quota) + if minValue > 0 && maxProcs < minValue { + return minValue, CPUQuotaMinUsed, nil + } + return maxProcs, CPUQuotaUsed, nil +} + +type queryer interface { + CPUQuota() (float64, bool, error) +} + +var ( + _newCgroups2 = cg.NewCGroups2ForCurrentProcess + _newCgroups = cg.NewCGroupsForCurrentProcess + _newQueryer = newQueryer +) + +func newQueryer() (queryer, error) { + cgroups, err := _newCgroups2() + if err == nil { + return cgroups, nil + } + if errors.Is(err, cg.ErrNotV2) { + return _newCgroups() + } + return nil, err +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go new file mode 100644 index 00000000000..e74701508ed --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go @@ -0,0 +1,31 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build !linux +// +build !linux + +package runtime + +// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process +// to a valid GOMAXPROCS value. This is Linux-specific and not supported in the +// current OS. +func CPUQuotaToGOMAXPROCS(_ int, _ func(v float64) int) (int, CPUQuotaStatus, error) { + return -1, CPUQuotaUndefined, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go new file mode 100644 index 00000000000..f8a2834ac00 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go @@ -0,0 +1,40 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package runtime + +import "math" + +// CPUQuotaStatus presents the status of how CPU quota is used +type CPUQuotaStatus int + +const ( + // CPUQuotaUndefined is returned when CPU quota is undefined + CPUQuotaUndefined CPUQuotaStatus = iota + // CPUQuotaUsed is returned when a valid CPU quota can be used + CPUQuotaUsed + // CPUQuotaMinUsed is returned when CPU quota is smaller than the min value + CPUQuotaMinUsed +) + +// DefaultRoundFunc is the default function to convert CPU quota from float to int. It rounds the value down (floor). +func DefaultRoundFunc(v float64) int { + return int(math.Floor(v)) +} diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go new file mode 100644 index 00000000000..e561fe60b20 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go @@ -0,0 +1,139 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to +// match the configured Linux CPU quota. Unlike the top-level automaxprocs +// package, it lets the caller configure logging and handle errors. +package maxprocs // import "go.uber.org/automaxprocs/maxprocs" + +import ( + "os" + "runtime" + + iruntime "go.uber.org/automaxprocs/internal/runtime" +) + +const _maxProcsKey = "GOMAXPROCS" + +func currentMaxProcs() int { + return runtime.GOMAXPROCS(0) +} + +type config struct { + printf func(string, ...interface{}) + procs func(int, func(v float64) int) (int, iruntime.CPUQuotaStatus, error) + minGOMAXPROCS int + roundQuotaFunc func(v float64) int +} + +func (c *config) log(fmt string, args ...interface{}) { + if c.printf != nil { + c.printf(fmt, args...) + } +} + +// An Option alters the behavior of Set. +type Option interface { + apply(*config) +} + +// Logger uses the supplied printf implementation for log output. By default, +// Set doesn't log anything. +func Logger(printf func(string, ...interface{})) Option { + return optionFunc(func(cfg *config) { + cfg.printf = printf + }) +} + +// Min sets the minimum GOMAXPROCS value that will be used. +// Any value below 1 is ignored. +func Min(n int) Option { + return optionFunc(func(cfg *config) { + if n >= 1 { + cfg.minGOMAXPROCS = n + } + }) +} + +// RoundQuotaFunc sets the function that will be used to covert the CPU quota from float to int. +func RoundQuotaFunc(rf func(v float64) int) Option { + return optionFunc(func(cfg *config) { + cfg.roundQuotaFunc = rf + }) +} + +type optionFunc func(*config) + +func (of optionFunc) apply(cfg *config) { of(cfg) } + +// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning +// any error encountered and an undo function. +// +// Set is a no-op on non-Linux systems and in Linux environments without a +// configured CPU quota. +func Set(opts ...Option) (func(), error) { + cfg := &config{ + procs: iruntime.CPUQuotaToGOMAXPROCS, + roundQuotaFunc: iruntime.DefaultRoundFunc, + minGOMAXPROCS: 1, + } + for _, o := range opts { + o.apply(cfg) + } + + undoNoop := func() { + cfg.log("maxprocs: No GOMAXPROCS change to reset") + } + + // Honor the GOMAXPROCS environment variable if present. Otherwise, amend + // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is + // Linux, and guarantee a minimum value of 1. The minimum guaranteed value + // can be overridden using `maxprocs.Min()`. + if max, exists := os.LookupEnv(_maxProcsKey); exists { + cfg.log("maxprocs: Honoring GOMAXPROCS=%q as set in environment", max) + return undoNoop, nil + } + + maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc) + if err != nil { + return undoNoop, err + } + + if status == iruntime.CPUQuotaUndefined { + cfg.log("maxprocs: Leaving GOMAXPROCS=%v: CPU quota undefined", currentMaxProcs()) + return undoNoop, nil + } + + prev := currentMaxProcs() + undo := func() { + cfg.log("maxprocs: Resetting GOMAXPROCS to %v", prev) + runtime.GOMAXPROCS(prev) + } + + switch status { + case iruntime.CPUQuotaMinUsed: + cfg.log("maxprocs: Updating GOMAXPROCS=%v: using minimum allowed GOMAXPROCS", maxProcs) + case iruntime.CPUQuotaUsed: + cfg.log("maxprocs: Updating GOMAXPROCS=%v: determined from CPU quota", maxProcs) + } + + runtime.GOMAXPROCS(maxProcs) + return undo, nil +} diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/vendor/go.uber.org/automaxprocs/maxprocs/version.go new file mode 100644 index 00000000000..cc7fc5aee12 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/maxprocs/version.go @@ -0,0 +1,24 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package maxprocs + +// Version is the current package version. +const Version = "1.6.0" diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 81faec7e75d..97bd8b06f7a 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -225,6 +225,11 @@ var fhBytes = sync.Pool{ }, } +func invalidHTTP1LookingFrameHeader() FrameHeader { + fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 ")) + return fh +} + // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader. // Most users should use Framer.ReadFrame instead. func ReadFrameHeader(r io.Reader) (FrameHeader, error) { @@ -503,10 +508,16 @@ func (fr *Framer) ReadFrame() (Frame, error) { return nil, err } if fh.Length > fr.maxReadSize { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, ErrFrameTooLarge } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, err } f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload) diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index b640deb0e0a..51fca38f61d 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -1068,7 +1068,10 @@ func (sc *serverConn) serve(conf http2Config) { func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { if sc.pingSent { - sc.vlogf("timeout waiting for PING response") + sc.logf("timeout waiting for PING response") + if f := sc.countErrorFunc; f != nil { + f("conn_close_lost_ping") + } sc.conn.Close() return } diff --git a/vendor/golang.org/x/net/publicsuffix/data/children b/vendor/golang.org/x/net/publicsuffix/data/children index 08261bffd19..986a246a6c0 100644 Binary files a/vendor/golang.org/x/net/publicsuffix/data/children and b/vendor/golang.org/x/net/publicsuffix/data/children differ diff --git a/vendor/golang.org/x/net/publicsuffix/data/nodes b/vendor/golang.org/x/net/publicsuffix/data/nodes index 1dae6ede8f2..38b8999600c 100644 Binary files a/vendor/golang.org/x/net/publicsuffix/data/nodes and b/vendor/golang.org/x/net/publicsuffix/data/nodes differ diff --git a/vendor/golang.org/x/net/publicsuffix/data/text b/vendor/golang.org/x/net/publicsuffix/data/text index 7e516413f6c..b151d97de27 100644 --- a/vendor/golang.org/x/net/publicsuffix/data/text +++ b/vendor/golang.org/x/net/publicsuffix/data/text @@ -1 +1 @@ -birkenesoddtangentinglogoweirbitbucketrzynishikatakayamatta-varjjatjomembersaltdalovepopartysfjordiskussionsbereichatinhlfanishikatsuragitappassenger-associationishikawazukamiokameokamakurazakitaurayasudabitternidisrechtrainingloomy-routerbjarkoybjerkreimdbalsan-suedtirololitapunkapsienamsskoganeibmdeveloperauniteroirmemorialombardiadempresashibetsukumiyamagasakinderoyonagunicloudevelopmentaxiijimarriottayninhaccanthobby-siteval-d-aosta-valleyoriikaracolognebinatsukigataiwanumatajimidsundgcahcesuolocustomer-ocimperiautoscanalytics-gatewayonagoyaveroykenflfanpachihayaakasakawaiishopitsitemasekd1kappenginedre-eikerimo-siemenscaledekaascolipicenoboribetsucks3-eu-west-3utilities-16-balestrandabergentappsseekloges3-eu-west-123paginawebcamauction-acornfshostrodawaraktyubinskaunicommbank123kotisivultrobjectselinogradimo-i-rana4u2-localhostrolekanieruchomoscientistordal-o-g-i-nikolaevents3-ap-northeast-2-ddnsking123homepagefrontappchizip61123saitamakawababia-goracleaningheannakadomarineat-urlimanowarudakuneustarostwodzislawdev-myqnapcloudcontrolledgesuite-stagingdyniamusementdllclstagehirnikonantomobelementorayokosukanoyakumoliserniaurland-4-salernord-aurdalipaywhirlimiteddnslivelanddnss3-ap-south-123siteweberlevagangaviikanonji234lima-cityeats3-ap-southeast-123webseiteambulancechireadmyblogspotaribeiraogakicks-assurfakefurniturealmpmninoheguribigawaurskog-holandinggfarsundds3-ap-southeast-20001wwwedeployokote123hjemmesidealerdalaheadjuegoshikibichuobiraustevollimombetsupplyokoze164-balena-devices3-ca-central-123websiteleaf-south-12hparliamentatsunobninsk8s3-eu-central-1337bjugnishimerablackfridaynightjxn--11b4c3ditchyouripatriabloombergretaijindustriesteinkjerbloxcmsaludivtasvuodnakaiwanairlinekobayashimodatecnologiablushakotanishinomiyashironomniwebview-assetsalvadorbmoattachmentsamegawabmsamnangerbmwellbeingzonebnrweatherchannelsdvrdnsamparalleluxenishinoomotegotsukishiwadavvenjargamvikarpaczest-a-la-maisondre-landivttasvuotnakamai-stagingloppennebomlocalzonebonavstackartuzybondigitaloceanspacesamsclubartowest1-usamsunglugsmall-webspacebookonlineboomlaakesvuemielecceboschristmasakilatiron-riopretoeidsvollovesickaruizawabostik-serverrankoshigayachtsandvikcoromantovalle-d-aostakinouebostonakijinsekikogentlentapisa-geekarumaifmemsetkmaxxn--12c1fe0bradescotksatmpaviancapitalonebouncemerckmsdscloudiybounty-fullensakerrypropertiesangovtoyosatoyokawaboutiquebecologialaichaugiangmbhartiengiangminakamichiharaboutireservdrangedalpusercontentoyotapfizerboyfriendoftheinternetflixn--12cfi8ixb8lublindesnesanjosoyrovnoticiasannanishinoshimattelemarkasaokamikitayamatsurinfinitigopocznore-og-uvdalucaniabozen-sudtiroluccanva-appstmnishiokoppegardray-dnsupdaterbozen-suedtirolukowesteuropencraftoyotomiyazakinsurealtypeformesswithdnsannohekinanporovigonohejinternationaluroybplacedogawarabikomaezakirunordkappgfoggiabrandrayddns5ybrasiliadboxoslockerbresciaogashimadachicappadovaapstemp-dnswatchest-mon-blogueurodirumagazinebrindisiciliabroadwaybroke-itvedestrandraydnsanokashibatakashimashikiyosatokigawabrokerbrothermesserlifestylebtimnetzpisdnpharmaciensantamariakebrowsersafetymarketingmodumetacentrumeteorappharmacymruovatlassian-dev-builderschaefflerbrumunddalutskashiharabrusselsantoandreclaimsanukintlon-2bryanskiptveterinaireadthedocsaobernardovre-eikerbrynebwestus2bzhitomirbzzwhitesnowflakecommunity-prochowicecomodalenissandoycompanyaarphdfcbankasumigaurawa-mazowszexn--1ck2e1bambinagisobetsuldalpha-myqnapcloudaccess3-us-east-2ixboxeroxfinityolasiteastus2comparemarkerryhotelsaves-the-whalessandria-trani-barletta-andriatranibarlettaandriacomsecaasnesoddeno-stagingrondarcondoshifteditorxn--1ctwolominamatarnobrzegrongrossetouchijiwadedyn-berlincolnissayokoshibahikariyaltakazakinzais-a-bookkeepermarshallstatebankasuyalibabahccavuotnagaraholtaleniwaizumiotsurugashimaintenanceomutazasavonarviikaminoyamaxunispaceconferenceconstructionflashdrivefsncf-ipfsaxoconsuladobeio-static-accesscamdvrcampaniaconsultantranoyconsultingroundhandlingroznysaitohnoshookuwanakayamangyshlakdnepropetrovskanlandyndns-freeboxostrowwlkpmgrphilipsyno-dschokokekscholarshipschoolbusinessebycontactivetrailcontagematsubaravendbambleborkdalvdalcest-le-patron-rancherkasydneyukuhashimokawavoues3-sa-east-1contractorskenissedalcookingruecoolblogdnsfor-better-thanhhoarairforcentralus-1cooperativano-frankivskodjeephonefosschoolsztynsetransiphotographysiocoproductionschulplattforminamiechizenisshingucciprianiigatairaumalatvuopmicrolightinguidefinimaringatlancastercorsicafjschulservercosenzakopanecosidnshome-webservercellikescandypopensocialcouchpotatofrieschwarzgwangjuh-ohtawaramotoineppueblockbusternopilawacouncilcouponscrapper-sitecozoravennaharimalborkaszubytemarketscrappinguitarscrysecretrosnubananarepublic-inquiryurihonjoyenthickaragandaxarnetbankanzakiwielunnerepairbusanagochigasakishimabarakawaharaolbia-tempio-olbiatempioolbialowiezachpomorskiengiangjesdalolipopmcdirepbodyn53cqcxn--1lqs03niyodogawacrankyotobetsumidaknongujaratmallcrdyndns-homednscwhminamifuranocreditcardyndns-iphutholdingservehttpbincheonl-ams-1creditunionionjukujitawaravpagecremonashorokanaiecrewhoswholidaycricketnedalcrimeast-kazakhstanangercrotonecrowniphuyencrsvp4cruiseservehumourcuisinellair-traffic-controllagdenesnaaseinet-freakserveircasertainaircraftingvolloansnasaarlanduponthewifidelitypedreamhostersaotomeldaluxurycuneocupcakecuritibacgiangiangryggeecurvalled-aostargets-itranslatedyndns-mailcutegirlfriendyndns-office-on-the-webhoptogurafedoraprojectransurlfeirafembetsukuis-a-bruinsfanfermodenakasatsunairportrapaniizaferraraferraris-a-bulls-fanferrerotikagoshimalopolskanittedalfetsundyndns-wikimobetsumitakagildeskaliszkolamericanfamilydservemp3fgunmaniwamannorth-kazakhstanfhvalerfilegear-augustowiiheyakagefilegear-deatnuniversitysvardofilegear-gbizfilegear-iefilegear-jpmorgangwonporterfilegear-sg-1filminamiizukamiminefinalchikugokasellfyis-a-candidatefinancefinnoyfirebaseappiemontefirenetlifylkesbiblackbaudcdn-edgestackhero-networkinggroupowiathletajimabaria-vungtaudiopsysharpigboatshawilliamhillfirenzefirestonefireweblikes-piedmontravelersinsurancefirmdalegalleryfishingoldpoint2thisamitsukefitjarfitnessettsurugiminamimakis-a-catererfjalerfkatsushikabeebyteappilottonsberguovdageaidnunjargausdalflekkefjordyndns-workservep2phxn--1lqs71dyndns-remotewdyndns-picserveminecraftransporteflesbergushikamifuranorthflankatsuyamashikokuchuoflickragerokunohealthcareershellflierneflirfloginlinefloppythonanywherealtorfloraflorencefloripalmasfjordenfloristanohatajiris-a-celticsfanfloromskogxn--2m4a15eflowershimokitayamafltravinhlonganflynnhosting-clusterfncashgabadaddjabbottoyourafndyndns1fnwkzfolldalfoolfor-ourfor-somegurownproviderfor-theaterfordebianforexrotheworkpccwinbar0emmafann-arborlandd-dnsiskinkyowariasahikawarszawashtenawsmppl-wawsglobalacceleratorahimeshimakanegasakievennodebalancern4t3l3p0rtatarantours3-ap-northeast-123minsidaarborteaches-yogano-ipifony-123miwebaccelastx4432-b-datacenterprisesakijobservableusercontentateshinanomachintaifun-dnsdojournalistoloseyouriparisor-fronavuotnarashinoharaetnabudejjunipereggio-emilia-romagnaroyboltateyamajureggiocalabriakrehamnayoro0o0forgotdnshimonitayanagithubpreviewsaikisarazure-mobileirfjordynnservepicservequakeforli-cesena-forlicesenaforlillehammerfeste-ipimientaketomisatoolshimonosekikawaforsalegoismailillesandefjordynservebbservesarcasmileforsandasuolodingenfortalfortefosneshimosuwalkis-a-chefashionstorebaseljordyndns-serverisignfotrdynulvikatowicefoxn--2scrj9casinordlandurbanamexnetgamersapporomurafozfr-1fr-par-1fr-par-2franamizuhoboleslawiecommerce-shoppingyeongnamdinhachijohanamakisofukushimaoris-a-conservativegarsheiheijis-a-cparachutingfredrikstadynv6freedesktopazimuthaibinhphuocelotenkawakayamagnetcieszynh-servebeero-stageiseiroumugifuchungbukharag-cloud-championshiphoplixn--30rr7yfreemyiphosteurovisionredumbrellangevagrigentobishimadridvagsoygardenebakkeshibechambagricoharugbydgoszczecin-berlindasdaburfreesitefreetlshimotsukefreisennankokubunjis-a-cubicle-slavellinodeobjectshimotsumafrenchkisshikindleikangerfreseniushinichinanfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfroganshinjotelulubin-vpncateringebunkyonanaoshimamateramockashiwarafrognfrolandynvpnpluservicesevastopolitiendafrom-akamaized-stagingfrom-alfrom-arfrom-azurewebsiteshikagamiishibuyabukihokuizumobaragusabaerobaticketshinjukuleuvenicefrom-campobassociatest-iserveblogsytenrissadistdlibestadultrentin-sudtirolfrom-coachaseljeducationcillahppiacenzaganfrom-ctrentin-sued-tirolfrom-dcatfooddagestangefrom-decagliarikuzentakataikillfrom-flapymntrentin-suedtirolfrom-gap-east-1from-higashiagatsumagoianiafrom-iafrom-idyroyrvikingulenfrom-ilfrom-in-the-bandairtelebitbridgestonemurorangecloudplatform0from-kshinkamigototalfrom-kyfrom-langsonyantakahamalselveruminamiminowafrom-malvikaufentigerfrom-mdfrom-mein-vigorlicefrom-mifunefrom-mnfrom-modshinshinotsurgeryfrom-mshinshirofrom-mtnfrom-ncatholicurus-4from-ndfrom-nefrom-nhs-heilbronnoysundfrom-njshintokushimafrom-nminamioguni5from-nvalledaostargithubusercontentrentino-a-adigefrom-nycaxiaskvollpagesardegnarutolgaulardalvivanovoldafrom-ohdancefrom-okegawassamukawataris-a-democratrentino-aadigefrom-orfrom-panasonichernovtsykkylvenneslaskerrylogisticsardiniafrom-pratohmamurogawatsonrenderfrom-ris-a-designerimarugame-hostyhostingfrom-schmidtre-gauldalfrom-sdfrom-tnfrom-txn--32vp30hachinoheavyfrom-utsiracusagaeroclubmedecin-addrammenuorodoyerfrom-val-daostavalleyfrom-vtrentino-alto-adigefrom-wafrom-wiardwebthingsjcbnpparibashkiriafrom-wvallee-aosteroyfrom-wyfrosinonefrostabackplaneapplebesbyengerdalp1froyal-commissionfruskydivingfujiiderafujikawaguchikonefujiminokamoenairtrafficplexus-2fujinomiyadapliefujiokazakinkobearalvahkikonaibetsubame-south-1fujisatoshoeshintomikasaharafujisawafujishiroishidakabiratoridediboxn--3bst00minamisanrikubetsupportrentino-altoadigefujitsuruokakamigaharafujiyoshidappnodearthainguyenfukayabeardubaikawagoefukuchiyamadatsunanjoburgfukudomigawafukuis-a-doctorfukumitsubishigakirkeneshinyoshitomiokamisatokamachippubetsuikitchenfukuokakegawafukuroishikariwakunigamigrationfukusakirovogradoyfukuyamagatakaharunusualpersonfunabashiriuchinadattorelayfunagatakahashimamakiryuohkurafunahashikamiamakusatsumasendaisenergyeongginowaniihamatamakinoharafundfunkfeuerfuoiskujukuriyamandalfuosskoczowindowskrakowinefurubirafurudonordreisa-hockeynutwentertainmentrentino-s-tirolfurukawajimangolffanshiojirishirifujiedafusoctrangfussagamiharafutabayamaguchinomihachimanagementrentino-stirolfutboldlygoingnowhere-for-more-og-romsdalfuttsurutashinais-a-financialadvisor-aurdalfuturecmshioyamelhushirahamatonbetsurnadalfuturehostingfuturemailingfvghakuis-a-gurunzenhakusandnessjoenhaldenhalfmoonscalebookinghostedpictetrentino-sud-tirolhalsakakinokiaham-radio-opinbar1hamburghammarfeastasiahamurakamigoris-a-hard-workershiraokamisunagawahanamigawahanawahandavvesiidanangodaddyn-o-saurealestatefarmerseinehandcrafteducatorprojectrentino-sudtirolhangglidinghangoutrentino-sued-tirolhannannestadhannosegawahanoipinkazohanyuzenhappouzshiratakahagianghasamap-northeast-3hasaminami-alpshishikuis-a-hunterhashbanghasudazaifudaigodogadobeioruntimedio-campidano-mediocampidanomediohasura-appinokokamikoaniikappudopaashisogndalhasvikazteleportrentino-suedtirolhatogayahoooshikamagayaitakamoriokakudamatsuehatoyamazakitahiroshimarcheapartmentshisuifuettertdasnetzhatsukaichikaiseiyoichipshitaramahattfjelldalhayashimamotobusells-for-lesshizukuishimoichilloutsystemscloudsitehazuminobushibukawahelplfinancialhelsinkitakamiizumisanofidonnakamurataitogliattinnhemneshizuokamitondabayashiogamagoriziahemsedalhepforgeblockshoujis-a-knightpointtokaizukamaishikshacknetrentinoa-adigehetemlbfanhigashichichibuzentsujiiehigashihiroshimanehigashiizumozakitakatakanabeautychyattorneyagawakkanaioirasebastopoleangaviikadenagahamaroyhigashikagawahigashikagurasoedahigashikawakitaaikitakyushunantankazunovecorebungoonow-dnshowahigashikurumeinforumzhigashimatsushimarnardalhigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycleshowtimeloyhigashinarusells-for-uhigashinehigashiomitamanoshiroomghigashiosakasayamanakakogawahigashishirakawamatakanezawahigashisumiyoshikawaminamiaikitamihamadahigashitsunospamproxyhigashiurausukitamotosunnydayhigashiyamatokoriyamanashiibaclieu-1higashiyodogawahigashiyoshinogaris-a-landscaperspectakasakitanakagusukumoldeliveryhippyhiraizumisatohokkaidontexistmein-iservschulecznakaniikawatanagurahirakatashinagawahiranais-a-lawyerhirarahiratsukaeruhirayaizuwakamatsubushikusakadogawahitachiomiyaginozawaonsensiositehitachiotaketakaokalmykiahitraeumtgeradegreehjartdalhjelmelandholyhomegoodshwinnersiiitesilkddiamondsimple-urlhomeipioneerhomelinkyard-cloudjiffyresdalhomelinuxn--3ds443ghomeofficehomesecuritymacaparecidahomesecuritypchiryukyuragiizehomesenseeringhomeskleppippugliahomeunixn--3e0b707ehondahonjyoitakarazukaluganskfh-muensterhornindalhorsells-itrentinoaadigehortendofinternet-dnsimplesitehospitalhotelwithflightsirdalhotmailhoyangerhoylandetakasagooglecodespotrentinoalto-adigehungyenhurdalhurumajis-a-liberalhyllestadhyogoris-a-libertarianhyugawarahyundaiwafuneis-very-evillasalleitungsenis-very-goodyearis-very-niceis-very-sweetpepperugiais-with-thebandoomdnstraceisk01isk02jenv-arubacninhbinhdinhktistoryjeonnamegawajetztrentinostiroljevnakerjewelryjgorajlljls-sto1jls-sto2jls-sto3jmpixolinodeusercontentrentinosud-tiroljnjcloud-ver-jpchitosetogitsuliguriajoyokaichibahcavuotnagaivuotnagaokakyotambabymilk3jozis-a-musicianjpnjprsolarvikhersonlanxessolundbeckhmelnitskiyamasoykosaigawakosakaerodromegalloabatobamaceratachikawafaicloudineencoreapigeekoseis-a-painterhostsolutionslupskhakassiakosheroykoshimizumakis-a-patsfankoshughesomakosugekotohiradomainstitutekotourakouhokumakogenkounosupersalevangerkouyamasudakouzushimatrixn--3pxu8khplaystation-cloudyclusterkozagawakozakis-a-personaltrainerkozowiosomnarviklabudhabikinokawachinaganoharamcocottekpnkppspbarcelonagawakepnord-odalwaysdatabaseballangenkainanaejrietisalatinabenogiehtavuoatnaamesjevuemielnombrendlyngen-rootaruibxos3-us-gov-west-1krasnikahokutokonamegatakatoris-a-photographerokussldkrasnodarkredstonekrelliankristiansandcatsoowitdkmpspawnextdirectrentinosudtirolkristiansundkrodsheradkrokstadelvaldaostavangerkropyvnytskyis-a-playershiftcryptonomichinomiyakekryminamiyamashirokawanabelaudnedalnkumamotoyamatsumaebashimofusakatakatsukis-a-republicanonoichinosekigaharakumanowtvaokumatorinokumejimatsumotofukekumenanyokkaichirurgiens-dentistes-en-francekundenkunisakis-a-rockstarachowicekunitachiaraisaijolsterkunitomigusukukis-a-socialistgstagekunneppubtlsopotrentinosued-tirolkuokgroupizzakurgankurobegetmyipirangalluplidlugolekagaminorddalkurogimimozaokinawashirosatochiokinoshimagentositempurlkuroisodegaurakuromatsunais-a-soxfankuronkurotakikawasakis-a-studentalkushirogawakustanais-a-teacherkassyncloudkusuppliesor-odalkutchanelkutnokuzumakis-a-techietipslzkvafjordkvalsundkvamsterdamnserverbaniakvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspdnsor-varangermishimatsusakahogirlymisugitokorozawamitakeharamitourismartlabelingmitoyoakemiuramiyazurecontainerdpoliticaobangmiyotamatsukuris-an-actormjondalenmonzabrianzaramonzaebrianzamonzaedellabrianzamordoviamorenapolicemoriyamatsuuramoriyoshiminamiashigaramormonstermoroyamatsuzakis-an-actressmushcdn77-sslingmortgagemoscowithgoogleapiszmoseushimogosenmosjoenmoskenesorreisahayakawakamiichikawamisatottoris-an-anarchistjordalshalsenmossortlandmosviknx-serversusakiyosupabaseminemotegit-reposoruminanomoviemovimientokyotangotembaixadattowebhareidsbergmozilla-iotrentinosuedtirolmtranbytomaridagawalmartrentinsud-tirolmuikaminokawanishiaizubangemukoelnmunakatanemuosattemupkomatsushimassa-carrara-massacarraramassabuzzmurmanskomforbar2murotorcraftranakatombetsumy-gatewaymusashinodesakegawamuseumincomcastoripressorfoldmusicapetownnews-stagingmutsuzawamy-vigormy-wanggoupilemyactivedirectorymyamazeplaymyasustor-elvdalmycdmycloudnsoundcastorjdevcloudfunctionsokndalmydattolocalcertificationmyddnsgeekgalaxymydissentrentinsudtirolmydobissmarterthanyoumydrobofageometre-experts-comptablesowamydspectruminisitemyeffectrentinsued-tirolmyfastly-edgekey-stagingmyfirewalledreplittlestargardmyforuminterecifedextraspace-to-rentalstomakomaibaramyfritzmyftpaccesspeedpartnermyhome-servermyjinomykolaivencloud66mymailermymediapchoseikarugalsacemyokohamamatsudamypeplatformsharis-an-artistockholmestrandmypetsphinxn--41amyphotoshibajddarvodkafjordvaporcloudmypictureshinomypsxn--42c2d9amysecuritycamerakermyshopblockspjelkavikommunalforbundmyshopifymyspreadshopselectrentinsuedtirolmytabitordermythic-beastspydebergmytis-a-anarchistg-buildermytuleap-partnersquaresindevicenzamyvnchoshichikashukudoyamakeuppermywirecipescaracallypoivronpokerpokrovskommunepolkowicepoltavalle-aostavernpomorzeszowithyoutuberspacekitagawaponpesaro-urbino-pesarourbinopesaromasvuotnaritakurashikis-bykleclerchitachinakagawaltervistaipeigersundynamic-dnsarlpordenonepornporsangerporsangugeporsgrunnanpoznanpraxihuanprdprgmrprimetelprincipeprivatelinkomonowruzhgorodeoprivatizehealthinsuranceprofesionalprogressivegasrlpromonza-e-della-brianzaptokuyamatsushigepropertysnesrvarggatrevisogneprotectionprotonetroandindependent-inquest-a-la-masionprudentialpruszkowiwatsukiyonotaireserve-onlineprvcyonabarumbriaprzeworskogpunyufuelpupulawypussycatanzarowixsitepvhachirogatakahatakaishimojis-a-geekautokeinotteroypvtrogstadpwchowderpzqhadanorthwesternmutualqldqotoyohashimotoshimaqponiatowadaqslgbtroitskomorotsukagawaqualifioapplatter-applatterplcube-serverquangngais-certifiedugit-pagespeedmobilizeroticaltanissettailscaleforcequangninhthuanquangtritonoshonais-foundationquickconnectromsakuragawaquicksytestreamlitapplumbingouvaresearchitectesrhtrentoyonakagyokutoyakomakizunokunimimatakasugais-an-engineeringquipelementstrippertuscanytushungrytuvalle-daostamayukis-into-animeiwamizawatuxfamilytuyenquangbinhthuantwmailvestnesuzukis-gonevestre-slidreggio-calabriavestre-totennishiawakuravestvagoyvevelstadvibo-valentiaavibovalentiavideovinhphuchromedicinagatorogerssarufutsunomiyawakasaikaitakokonoevinnicarbonia-iglesias-carboniaiglesiascarboniavinnytsiavipsinaapplurinacionalvirginanmokurennebuvirtual-userveexchangevirtualservervirtualuserveftpodhalevisakurais-into-carsnoasakuholeckodairaviterboliviajessheimmobilienvivianvivoryvixn--45br5cylvlaanderennesoyvladikavkazimierz-dolnyvladimirvlogintoyonezawavmintsorocabalashovhachiojiyahikobierzycevologdanskoninjambylvolvolkswagencyouvolyngdalvoorlopervossevangenvotevotingvotoyonovps-hostrowiechungnamdalseidfjordynathomebuiltwithdarkhangelskypecorittogojomeetoystre-slidrettozawawmemergencyahabackdropalermochizukikirarahkkeravjuwmflabsvalbardunloppadualstackomvuxn--3hcrj9chonanbuskerudynamisches-dnsarpsborgripeeweeklylotterywoodsidellogliastradingworse-thanhphohochiminhadselbuyshouseshirakolobrzegersundongthapmircloudletshiranukamishihorowowloclawekonskowolawawpdevcloudwpenginepoweredwphostedmailwpmucdnipropetrovskygearappodlasiellaknoluoktagajobojis-an-entertainerwpmudevcdnaccessojamparaglidingwritesthisblogoipodzonewroclawmcloudwsseoullensvanguardianwtcp4wtfastlylbanzaicloudappspotagereporthruherecreationinomiyakonojorpelandigickarasjohkameyamatotakadawuozuerichardlillywzmiuwajimaxn--4it797konsulatrobeepsondriobranconagareyamaizuruhrxn--4pvxs4allxn--54b7fta0ccistrondheimpertrixcdn77-secureadymadealstahaugesunderxn--55qw42gxn--55qx5dxn--5dbhl8dxn--5js045dxn--5rtp49citadelhichisochimkentozsdell-ogliastraderxn--5rtq34kontuminamiuonumatsunoxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264citicarrdrobakamaiorigin-stagingmxn--12co0c3b4evalleaostaobaomoriguchiharaffleentrycloudflare-ipfstcgroupaaskimitsubatamibulsan-suedtirolkuszczytnoopscbgrimstadrrxn--80aaa0cvacationsvchoyodobashichinohealth-carereforminamidaitomanaustdalxn--80adxhksveioxn--80ao21axn--80aqecdr1axn--80asehdbarclaycards3-us-west-1xn--80aswgxn--80aukraanghkeliwebpaaskoyabeagleboardxn--8dbq2axn--8ltr62konyvelohmusashimurayamassivegridxn--8pvr4uxn--8y0a063axn--90a1affinitylotterybnikeisencowayxn--90a3academiamicable-modemoneyxn--90aeroportsinfolionetworkangerxn--90aishobaraxn--90amckinseyxn--90azhytomyrxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byanagawaxn--asky-iraxn--aurskog-hland-jnbarclays3-us-west-2xn--avery-yuasakurastoragexn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbsvelvikongsbergxn--bck1b9a5dre4civilaviationfabricafederation-webredirectmediatechnologyeongbukashiwazakiyosembokutamamuraxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyanaizuxn--bjddar-ptarumizusawaxn--blt-elabcienciamallamaceiobbcn-north-1xn--bmlo-graingerxn--bod-2natalxn--bozen-sdtirol-2obanazawaxn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigation-aptibleadpagesquare7xn--brum-voagatrustkanazawaxn--btsfjord-9zaxn--bulsan-sdtirol-nsbarefootballooningjovikarasjoketokashikiyokawaraxn--c1avgxn--c2br7gxn--c3s14misakis-a-therapistoiaxn--cck2b3baremetalombardyn-vpndns3-website-ap-northeast-1xn--cckwcxetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-into-cartoonsokamitsuexn--ciqpnxn--clchc0ea0b2g2a9gcdxn--czr694bargainstantcloudfrontdoorestauranthuathienhuebinordre-landiherokuapparochernigovernmentjeldsundiscordsays3-website-ap-southeast-1xn--czrs0trvaroyxn--czru2dxn--czrw28barrel-of-knowledgeapplinziitatebayashijonawatebizenakanojoetsumomodellinglassnillfjordiscordsezgoraxn--d1acj3barrell-of-knowledgecomputermezproxyzgorzeleccoffeedbackanagawarmiastalowa-wolayangroupars3-website-ap-southeast-2xn--d1alfaststacksevenassigdalxn--d1atrysiljanxn--d5qv7z876clanbibaiduckdnsaseboknowsitallxn--davvenjrga-y4axn--djrs72d6uyxn--djty4koobindalxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--e1a4cldmail-boxaxn--eckvdtc9dxn--efvn9svn-repostuff-4-salexn--efvy88haebaruericssongdalenviknaklodzkochikushinonsenasakuchinotsuchiurakawaxn--ehqz56nxn--elqq16hagakhanhhoabinhduongxn--eveni-0qa01gaxn--f6qx53axn--fct429kooris-a-nascarfanxn--fhbeiarnxn--finny-yuaxn--fiq228c5hsbcleverappsassarinuyamashinazawaxn--fiq64barsycenterprisecloudcontrolappgafanquangnamasteigenoamishirasatochigifts3-website-eu-west-1xn--fiqs8swidnicaravanylvenetogakushimotoganexn--fiqz9swidnikitagatakkomaganexn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbsswiebodzindependent-commissionxn--forlcesena-c8axn--fpcrj9c3dxn--frde-granexn--frna-woaxn--frya-hraxn--fzc2c9e2clickrisinglesjaguarxn--fzys8d69uvgmailxn--g2xx48clinicasacampinagrandebungotakadaemongolianishitosashimizunaminamiawajikintuitoyotsukaidownloadrudtvsaogoncapooguyxn--gckr3f0fastvps-serveronakanotoddenxn--gecrj9cliniquedaklakasamatsudoesntexisteingeekasserversicherungroks-theatrentin-sud-tirolxn--ggaviika-8ya47hagebostadxn--gildeskl-g0axn--givuotna-8yandexcloudxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-into-gamessinamsosnowieconomiasadojin-dslattuminamitanexn--gmqw5axn--gnstigbestellen-zvbrplsbxn--45brj9churcharterxn--gnstigliefern-wobihirosakikamijimayfirstorfjordxn--h-2failxn--h1ahnxn--h1alizxn--h2breg3eveneswinoujsciencexn--h2brj9c8clothingdustdatadetectrani-andria-barletta-trani-andriaxn--h3cuzk1dienbienxn--hbmer-xqaxn--hcesuolo-7ya35barsyonlinehimejiiyamanouchikujoinvilleirvikarasuyamashikemrevistathellequipmentjmaxxxjavald-aostatics3-website-sa-east-1xn--hebda8basicserversejny-2xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-s4accident-prevention-k3swisstufftoread-booksnestudioxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyaotsusonoxn--io0a7is-leetrentinoaltoadigexn--j1adpohlxn--j1aefauskedsmokorsetagayaseralingenovaraxn--j1ael8basilicataniaxn--j1amhaibarakisosakitahatakamatsukawaxn--j6w193gxn--jlq480n2rgxn--jlster-byasakaiminatoyookananiimiharuxn--jrpeland-54axn--jvr189misasaguris-an-accountantsmolaquilaocais-a-linux-useranishiaritabashikaoizumizakitashiobaraxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--45q11circlerkstagentsasayamaxn--koluokta-7ya57haiduongxn--kprw13dxn--kpry57dxn--kput3is-lostre-toteneis-a-llamarumorimachidaxn--krager-gyasugitlabbvieeexn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jdfastly-terrariuminamiiseharaxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasuokanmakiwakuratexn--kvnangen-k0axn--l-1fairwindsynology-diskstationxn--l1accentureklamborghinikkofuefukihabororosynology-dsuzakadnsaliastudynaliastrynxn--laheadju-7yatominamibosoftwarendalenugxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52basketballfinanzjaworznoticeableksvikaratsuginamikatagamilanotogawaxn--lesund-huaxn--lgbbat1ad8jejuxn--lgrd-poacctulaspeziaxn--lhppi-xqaxn--linds-pramericanexpresservegame-serverxn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liacn-northwest-1xn--lten-granvindafjordxn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddesxn--mgb9awbfbsbxn--1qqw23axn--mgba3a3ejtunesuzukamogawaxn--mgba3a4f16axn--mgba3a4fra1-deloittexn--mgba7c0bbn0axn--mgbaakc7dvfsxn--mgbaam7a8haiphongonnakatsugawaxn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00batsfjordiscountry-snowplowiczeladzlgleezeu-2xn--mgbai9azgqp6jelasticbeanstalkharkovalleeaostexn--mgbayh7gparasitexn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskopervikhmelnytskyivalleedaostexn--mgbqly7c0a67fbcngroks-thisayamanobeatsaudaxn--mgbqly7cvafricargoboavistanbulsan-sudtirolxn--mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2bauhauspostman-echofunatoriginstances3-website-us-east-1xn--mgbx4cd0abkhaziaxn--mix082fbx-osewienxn--mix891fbxosexyxn--mjndalen-64axn--mk0axindependent-inquiryxn--mk1bu44cnpyatigorskjervoyagexn--mkru45is-not-certifiedxn--mlatvuopmi-s4axn--mli-tlavagiskexn--mlselv-iuaxn--moreke-juaxn--mori-qsakuratanxn--mosjen-eyatsukannamihokksundxn--mot-tlavangenxn--mre-og-romsdal-qqbuservecounterstrikexn--msy-ula0hair-surveillancexn--mtta-vrjjat-k7aflakstadaokayamazonaws-cloud9guacuiababybluebiteckidsmynasushiobaracingrok-freeddnsfreebox-osascoli-picenogatabuseating-organicbcgjerdrumcprequalifymelbourneasypanelblagrarq-authgear-stagingjerstadeltaishinomakilovecollegefantasyleaguenoharauthgearappspacehosted-by-previderehabmereitattoolforgerockyombolzano-altoadigeorgeorgiauthordalandroideporteatonamidorivnebetsukubankanumazuryomitanocparmautocodebergamoarekembuchikumagayagawafflecelloisirs3-external-180reggioemiliaromagnarusawaustrheimbalsan-sudtirolivingitpagexlivornobserveregruhostingivestbyglandroverhalladeskjakamaiedge-stagingivingjemnes3-eu-west-2038xn--muost-0qaxn--mxtq1misawaxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--4dbgdty6ciscofreakamaihd-stagingriwataraindroppdalxn--nit225koryokamikawanehonbetsuwanouchikuhokuryugasakis-a-nursellsyourhomeftpiwatexn--nmesjevuemie-tcbalatinord-frontierxn--nnx388axn--nodessakurawebsozais-savedxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeservehalflifeinsurancexn--nvuotna-hwaxn--nyqy26axn--o1achernivtsicilynxn--4dbrk0cexn--o3cw4hakatanortonkotsunndalxn--o3cyx2axn--od0algardxn--od0aq3beneventodayusuharaxn--ogbpf8fldrvelvetromsohuissier-justicexn--oppegrd-ixaxn--ostery-fyatsushiroxn--osyro-wuaxn--otu796dxn--p1acfedjeezxn--p1ais-slickharkivallee-d-aostexn--pgbs0dhlx3xn--porsgu-sta26fedorainfraclouderaxn--pssu33lxn--pssy2uxn--q7ce6axn--q9jyb4cnsauheradyndns-at-homedepotenzamamicrosoftbankasukabedzin-brbalsfjordietgoryoshiokanravocats3-fips-us-gov-west-1xn--qcka1pmcpenzapposxn--qqqt11misconfusedxn--qxa6axn--qxamunexus-3xn--rady-iraxn--rdal-poaxn--rde-ulazioxn--rdy-0nabaris-uberleetrentinos-tirolxn--rennesy-v1axn--rhkkervju-01afedorapeoplefrakkestadyndns-webhostingujogaszxn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5naturalxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byawaraxn--rny31hakodatexn--rovu88bentleyusuitatamotorsitestinglitchernihivgubs3-website-us-west-1xn--rros-graphicsxn--rskog-uuaxn--rst-0naturbruksgymnxn--rsta-framercanvasxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byawatahamaxn--s-1faitheshopwarezzoxn--s9brj9cntraniandriabarlettatraniandriaxn--sandnessjen-ogbentrendhostingliwiceu-3xn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgne-graphoxn--4gbriminiserverxn--skierv-utazurestaticappspaceusercontentunkongsvingerxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5navigationxn--slt-elabogadobeaemcloud-fr1xn--smla-hraxn--smna-gratangenxn--snase-nraxn--sndre-land-0cbeppublishproxyuufcfanirasakindependent-panelomonza-brianzaporizhzhedmarkarelianceu-4xn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbeskidyn-ip24xn--srfold-byaxn--srreisa-q1axn--srum-gratis-a-bloggerxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbestbuyshoparenagasakikuchikuseihicampinashikiminohostfoldnavyuzawaxn--stre-toten-zcbetainaboxfuselfipartindependent-reviewegroweibolognagasukeu-north-1xn--t60b56axn--tckweddingxn--tiq49xqyjelenia-goraxn--tjme-hraxn--tn0agrocerydxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbhzc66xn--trentin-sdtirol-7vbialystokkeymachineu-south-1xn--trentino-sd-tirol-c3bielawakuyachimataharanzanishiazaindielddanuorrindigenamerikawauevje-og-hornnes3-website-us-west-2xn--trentino-sdtirol-szbiella-speziaxn--trentinosd-tirol-rzbieszczadygeyachiyodaeguamfamscompute-1xn--trentinosdtirol-7vbievat-band-campaignieznoorstaplesakyotanabellunordeste-idclkarlsoyxn--trentinsd-tirol-6vbifukagawalbrzycharitydalomzaporizhzhiaxn--trentinsdtirol-nsbigv-infolkebiblegnicalvinklein-butterhcloudiscoursesalangenishigotpantheonsitexn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atventuresinstagingxn--uc0ay4axn--uist22hakonexn--uisz3gxn--unjrga-rtashkenturindalxn--unup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbturystykaneyamazoexn--valle-d-aoste-ehboehringerikexn--valleaoste-e7axn--valledaoste-ebbvadsoccertmgreaterxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb-hostingxn--vermgensberatung-pwbiharstadotsubetsugarulezajskiervaksdalondonetskarmoyxn--vestvgy-ixa6oxn--vg-yiabruzzombieidskogasawarackmazerbaijan-mayenbaidarmeniaxn--vgan-qoaxn--vgsy-qoa0jellybeanxn--vgu402coguchikuzenishiwakinvestmentsaveincloudyndns-at-workisboringsakershusrcfdyndns-blogsitexn--vhquvestfoldxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bihoronobeokagakikugawalesundiscoverdalondrinaplesknsalon-1xn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1communexn--wgbl6axn--xhq521bikedaejeonbuk0xn--xkc2al3hye2axn--xkc2dl3a5ee0hakubackyardshiraois-a-greenxn--y9a3aquarelleasingxn--yer-znavois-very-badxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--4it168dxn--ystre-slidre-ujbiofficialorenskoglobodoes-itcouldbeworldishangrilamdongnairkitapps-audibleasecuritytacticsxn--0trq7p7nnishiharaxn--zbx025dxn--zf0ao64axn--zf0avxlxn--zfr164bipartsaloonishiizunazukindustriaxnbayernxz \ No newline at end of file +bolzano-altoadigevje-og-hornnes3-website-us-west-2bomlocustomer-ocienciabonavstackarasjoketokuyamashikokuchuobondigitaloceanspacesakurastoragextraspace-to-rentalstomakomaibarabonesakuratanishikatakazakindustriesteinkjerepbodynaliasnesoddeno-staginglobodoes-itcouldbeworfarsundiskussionsbereichateblobanazawarszawashtenawsapprunnerdpoliticaarparliamenthickarasuyamasoybookonlineboomladeskierniewiceboschristmasakilovecollegefantasyleaguedagestangebostik-serveronagasukeyword-oncillahppictetcieszynishikatsuragit-repostre-totendofinternet-dnsakurawebredirectmeiwamizawabostonakijinsekikogentlentapisa-geekaratsuginamikatagamimozaporizhzhegurinfinitigooglecode-builder-stg-buildereporthruhereclaimsakyotanabellunord-odalvdalcest-le-patron-k3salangenishikawazukamishihorobotdashgabadaddjabbotthuathienhuebouncemerckmsdscloudisrechtrafficplexus-4boutiquebecologialaichaugianglogowegroweibolognagasakikugawaltervistaikillondonetskarelianceboutireserve-onlineboyfriendoftheinternetflixn--11b4c3ditchyouriparmabozen-sudtirolondrinaplesknsalatrobeneventoeidsvollorenskogloomy-gatewaybozen-suedtirolovableprojectjeldsundivtasvuodnakamai-stagingloppennebplaceditorxn--12c1fe0bradescotaruinternationalovepoparochernihivgubamblebtimnetzjaworznotebook-fips3-fips-us-gov-east-1brandivttasvuotnakamuratajirintlon-2brasiliadboxoslodingenishimerabravendbarcelonagawakuyabukikiraragusabaerobatickets3-fips-us-gov-west-1bresciaogashimadachicappabianiceobridgestonebrindisiciliabroadwaybroke-itvedestrandixn--12cfi8ixb8lovesickarlsoybrokerevistathellebrothermesserlidlplfinancialpusercontentjmaxxxn--12co0c3b4evalleaostargets-itjomeldalucaniabrumunddaluccampobassociatesalon-1brusselsaloonishinomiyashironobryanskiervadsoccerhcloudyclusterbrynebweirbzhitomirumaintenanceclothingdustdatadetectoyouracngovtoystre-slidrettozawacnpyatigorskjakamaiedge-stagingreatercnsapporocntozsdeliverycodebergrayjayleaguesardegnarutoshimatta-varjjatranatalcodespotenzakopanecoffeedbackanagawatsonrendercommunity-prochowicecomockashiharacompanyantaishinomakimobetsulifestylefrakkestadurumisakindlegnicahcesuolohmusashimurayamaizuruhr-uni-bochuminamiechizenisshingucciminamifuranocomparemarkerryhotelsardiniacomputercomsecretrosnubarclays3-me-south-1condoshiibabymilk3conferenceconstructioniyodogawaconsuladobeio-static-accesscamdvrcampaniaconsultantranbyconsultingretakamoriokakudamatsuecontactivetrail-central-1contagematsubaracontractorstabacgiangiangryconvexecute-apictureshinordkappaviacookingrimstadynathomebuiltwithdarklangevagrarchitectestingripeeweeklylotterycooperativano-frankivskjervoyagecoprofesionalchikugodaddyn-o-saureadymadethis-a-anarchistjordalshalsenl-ams-1corsicafederationfabricable-modemoneycosenzamamidorivnecosidnsdojoburgriwataraindroppdalcouchpotatofriesarlcouncilcouponstackitagawacozoracpservernamegataitogodoesntexisteingeekashiwaracqcxn--1lqs71dyndns-at-homedepotrani-andria-barletta-trani-andriacrankyotobetsulubin-dsldyndns-at-workisboringsakershusrcfdyndns-blogsiteleaf-south-1crdyndns-freeboxosarpsborgroks-theatrentin-sud-tirolcreditcardyndns-homednsarufutsunomiyawakasaikaitakokonoecreditunioncremonasharis-a-bulls-fancrewp2cricketnedalcrimeast-kazakhstanangercrispawnextdirectraniandriabarlettatraniandriacrminamiiseharacrotonecrownipfizercrsasayamacruisesaseboknowsitallcryptonomichiharacuisinellamdongnairflowersassaris-a-candidatecuneocuritibackdropalermobarag-cloud-charitydalp1cutegirlfriendyndns-ipgwangjulvikashiwazakizunokuniminamiashigarafedoraprojectransiphdfcbankasserverrankoshigayakagefeirafembetsukubankasukabeautypedreamhosterscrapper-sitefermodalenferraraferraris-a-celticsfanferreroticallynxn--2scrj9cargoboavistanbulsan-sudtiroluhanskarmoyfetsundyndns-remotewdhlx3fgroundhandlingroznyfhvalerfilegear-sg-1filminamiminowafinalfinancefinnoyfirebaseapphilipscrappingrphonefosscryptedyndns-serverdalfirenetgamerscrysecuritytacticscwestus2firenzeaburfirestonefirmdaleilaocairportranslatedyndns-webhareidsbergroks-thisayamanobearalvahkikonaikawachinaganoharamcoachampionshiphoplixn--1qqw23afishingokasellfyresdalfitjarfitnessettsurugashimamurogawafjalerfkasumigaurayasudaflesbergrueflickragerotikagoshimandalflierneflirflogintohmangoldpoint2thisamitsukefloppymntransportefloraclegovcloudappservehttpbincheonflorencefloripadualstackasuyakumoduminamioguni5floristanohatakaharunservehumourfloromskoguidefinimalopolskanittedalfltransurlflutterflowhitesnowflakeflyfncarrdiyfndyndns-wikinkobayashimofusadojin-the-bandairlinemurorangecloudplatformshakotanpachihayaakasakawaharacingrondarfoolfor-ourfor-somedusajserveircasacampinagrandebulsan-suedtirolukowesleyfor-theaterfordebianforexrotheworkpccwhminamisanrikubetsupersaleksvikaszubytemarketingvollforgotdnserveminecraftrapanikkoelnforli-cesena-forlicesenaforlikescandypopensocialforsalesforceforsandasuoloisirservemp3fortalfosneservep2photographysiofotravelersinsurancefoxn--30rr7yfozfr-1fr-par-1fr-par-2franalytics-gatewayfredrikstadyndns-worksauheradyndns-mailfreedesktopazimuthaibinhphuocprapidyndns1freemyiphostyhostinguitarservepicservequakefreesitefreetlservesarcasmilefreightravinhlonganfrenchkisshikirovogradoyfreseniuservicebuskerudynnsaveincloudyndns-office-on-the-webflowtest-iservebloginlinefriuli-v-giuliarafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfrogansevastopolitiendafrognfrolandynservebbsaves-the-whalessandria-trani-barletta-andriatranibarlettaandriafrom-akamaiorigin-stagingujaratmetacentruminamitanefrom-alfrom-arfrom-azureedgecompute-1from-caltanissettainaircraftraeumtgeradealstahaugesunderfrom-cockpitrdynuniversitysvardofrom-ctrentin-sudtirolfrom-dcasertaipeigersundnparsaltdaluroyfrom-decafjsevenassieradzfrom-flatangerfrom-gap-southeast-3from-higashiagatsumagoianiafrom-iafrom-idynv6from-ilfrom-in-vpncashorokanaiefrom-ksewhoswholidayfrom-kyfrom-langsonyatomigrationfrom-mangyshlakamaized-stagingujohanamakinoharafrom-mdynvpnplusavonarviikamisatokonamerikawauefrom-meetrentin-sued-tirolfrom-mihamadanangoguchilloutsystemscloudscalebookinghosteurodirfrom-mnfrom-modellingulenfrom-msexyfrom-mtnfrom-ncasinordeste-idclkarpaczest-a-la-maisondre-landray-dnsaludrayddns-ipartintuitjxn--1ck2e1barclaycards3-globalatinabelementorayomitanobservableusercontentateyamauth-fipstmninomiyakonojosoyrovnoticeableitungsenirasakibxos3-ca-central-180reggio-emilia-romagnaroyolasitebinordlandeus-canvasitebizenakanojogaszkolamericanfamilyds3-ap-south-12hparallelimodxboxeroxjavald-aostaticsxmitakeharaugustow-corp-staticblitzgorzeleccocotteatonamifunebetsuikirkenes3-ap-northeast-2ixn--0trq7p7nninjambylive-oninohekinanporovigonnakasatsunaibigawaukraanghkembuchikumagayagawakkanaibetsubame-central-123websitebuildersvp4from-ndyroyrvikingrongrossetouchijiwadedyn-berlincolnfrom-nefrom-nhlfanfrom-njsheezyfrom-nminamiuonumatsunofrom-nvalled-aostargithubusercontentrentin-suedtirolfrom-nysagamiharafrom-ohdancefrom-okegawafrom-orfrom-palmasfjordenfrom-pratohnoshookuwanakanotoddenfrom-ris-a-chefashionstorebaseljordyndns-picsbssaudafrom-schmidtre-gauldalfrom-sdfrom-tnfrom-txn--32vp30hachinoheavyfrom-utsiracusagemakerfrom-val-daostavalleyfrom-vtrentino-a-adigefrom-wafrom-wiardwebspaceconfigunmarnardalfrom-wvalledaostarnobrzeguovdageaidnunjargausdalfrom-wyfrosinonefrostalowa-wolawafroyal-commissionfruskydivingushikamifuranorth-kazakhstanfujiiderafujikawaguchikonefujiminokamoenairtelebitbucketrzynh-servebeero-stageiseiroutingthecloudfujinomiyadappnodearthainguyenfujiokazakiryuohkurafujisatoshoeshellfujisawafujishiroishidakabiratoridediboxafujitsuruokakamigaharafujiyoshidatsunanjoetsumidaklakasamatsudogadobeioruntimedicinakaiwanairforcentralus-1fukayabeagleboardfukuchiyamadattorelayfukudomigawafukuis-a-conservativefsnoasakakinokiafukumitsubishigakisarazure-apigeefukuokakegawafukuroishikariwakunigamiharuovatlassian-dev-builderfukusakishiwadattoweberlevagangaviikanonjis-a-cpanelfukuyamagatakahashimamakisofukushimaniwamannordre-landfunabashiriuchinadavvenjargamvikatowicefunagatakahatakaishimokawafunahashikamiamakusatsumasendaisenergyeonggiizefundfunkfeuerfunnelshimonitayanagitapphutholdingsmall-websozais-a-cubicle-slaveroykenfuoiskujukuriyamaoris-a-democratrentino-aadigefuosskodjeezfurubirafurudonordreisa-hockeynutwentertainmentrentino-alto-adigefurukawaiishoppingxn--3bst00minamiyamashirokawanabeepsondriobranconagarahkkeravjunusualpersonfusoctrangyeongnamdinhs-heilbronnoysundfussaikisosakitahatakamatsukawafutabayamaguchinomihachimanagementrentino-altoadigefutboldlygoingnowhere-for-more-og-romsdalfuttsurutashinairtrafficmanagerfuturecmshimonosekikawafuturehosting-clusterfuturemailingzfvghakuis-a-doctoruncontainershimotsukehakusandnessjoenhaldenhalfmoonscaleforcehalsaitamatsukuris-a-financialadvisor-aurdalham-radio-ophuyenhamburghammarfeastasiahamurakamigoris-a-fullstackaufentigerhanamigawahanawahandahandcraftedugit-pages-researchedmarketplacehangglidinghangoutrentino-s-tirolhannannestadhannoshiroomghanoiphxn--3ds443ghanyuzenhappoumuginowaniihamatamakawajimap-southeast-4hasamazoncognitoigawahasaminami-alpshimotsumahashbanghasudahasura-appigboatshinichinanhasvikautokeinotionhatenablogspotrentino-stirolhatenadiaryhatinhachiojiyachiyodazaifudaigojomedio-campidano-mediocampidanomediohatogayachtshinjournalistorfjordhatoyamazakitakatakanezawahatsukaichikawamisatohokkaidontexistmein-iservschulegalleryhattfjelldalhayashimamotobusells-for-lesshinjukuleuvenicehazuminobushibuyahabacninhbinhdinhktrentino-sud-tirolhelpgfoggiahelsinkitakyushunantankazohemneshinkamigotoyokawahemsedalhepforgeblockshinshinotsupplyhetemlbfanheyflowienhigashichichibuzzhigashihiroshimanehigashiizumozakitamihokksundhigashikagawahigashikagurasoedahigashikawakitaaikitamotosumy-routerhigashikurumegurownproviderhigashimatsushimarriottrentino-sudtirolhigashimatsuyamakitaakitadaitomanaustdalhigashimurayamamotorcycleshinshirohigashinarusells-for-uzhhorodhigashinehigashiomitamamurausukitanakagusukumodshintokushimahigashiosakasayamanakakogawahigashishirakawamatakaokalmykiahigashisumiyoshikawaminamiaikitashiobarahigashitsunospamproxyhigashiurawa-mazowszexposeducatorprojectrentino-sued-tirolhigashiyamatokoriyamanashijonawatehigashiyodogawahigashiyoshinogaris-a-geekazunotogawahippythonanywherealminanohiraizumisatokaizukaluganskddiamondshintomikasaharahirakatashinagawahiranais-a-goodyearhirarahiratsukagawahirayahikobeatshinyoshitomiokamisunagawahitachiomiyakehitachiotaketakarazukamaishimodatehitradinghjartdalhjelmelandholyhomegoodshiojirishirifujiedahomeipikehomelinuxn--3e0b707ehomesecuritymacaparecidahomesecuritypcateringebungotakadaptableclerc66116-balsfjordeltaiwanumatajimidsundeportebinatsukigatakahamalvik8s3-ap-northeast-3utilities-12charstadaokagakirunocelotenkawadlugolekadena4ufcfanimsiteasypanelblagrigentobishimafeloansncf-ipfstdlibestadultatarantoyonakagyokutoyonezawapartments3-ap-northeast-123webseiteckidsmynascloudfrontierimo-siemenscaledekaascolipicenoboribetsubsc-paywhirlimitedds3-accesspoint-fips3-ap-east-123miwebaccelastx4432-b-datacenterprisesakihokuizumoarekepnord-aurdalipaynow-dns-dynamic-dnsabruzzombieidskogasawarackmazerbaijan-mayenbaidarmeniajureggio-calabriaknoluoktagajoboji234lima-citychyattorneyagawafflecellclstagehirnayorobninsk123kotisivultrobjectselinogradimo-i-ranamizuhobby-siteaches-yogano-ip-ddnsgeekgalaxyzgierzgorakrehamnfshostrowwlkpnftstorage164-balsan-suedtirolillyokozeastus2000123paginawebadorsiteshikagamiishibechambagricoharugbydgoszczecin-addrammenuorogerscbgdyniaktyubinskaunicommuneustarostwodzislawdev-myqnapcloudflarecn-northwest-123sitewebcamauction-acornikonantotalimanowarudakunexus-2038homesenseeringhomeskleppilottottoris-a-greenhomeunixn--3hcrj9catfoodraydnsalvadorhondahonjyoitakasagonohejis-a-guruzshioyaltakkolobrzegersundongthapmircloudnshome-webservercelliguriahornindalhorsells-itrentino-suedtirolhorteneiheijis-a-hard-workershirahamatonbetsupportrentinoa-adigehospitalhotelwithflightshirakomaganehotmailhoyangerhoylandetakasakitaurahrsnillfjordhungyenhurdalhurumajis-a-hunterhyllestadhyogoris-a-knightpointtokashikitchenhypernodessaitokamachippubetsubetsugaruhyugawarahyundaiwafuneis-uberleetrentinoaltoadigeis-very-badis-very-evillasalleirvikharkovallee-d-aosteis-very-goodis-very-niceis-very-sweetpepperugiais-with-thebandoomdnsiskinkyowariasahikawaisk01isk02jellybeanjenv-arubahcavuotnagahamaroygardenflfanjeonnamsosnowiecaxiaskoyabenoopssejny-1jetztrentinos-tiroljevnakerjewelryjlljls-sto1jls-sto2jls-sto365jmpioneerjnjcloud-ver-jpcatholicurus-3joyentrentinostiroljoyokaichibahccavuotnagaivuotnagaokakyotambabybluebitemasekd1jozis-a-llamashikiwakuratejpmorgangwonjpnjprshoujis-a-musiciankoseis-a-painterhostsolutionshiraokamitsuekosheroykoshimizumakis-a-patsfankoshugheshwiiheyahoooshikamagayaitakashimarshallstatebankhplaystation-cloudsitekosugekotohiradomainsurealtypo3serverkotourakouhokumakogenkounosunnydaykouyamatlabcn-north-1kouzushimatrixn--41akozagawakozakis-a-personaltrainerkozowilliamhillkppspdnsigdalkrasnikahokutokyotangopocznore-og-uvdalkrasnodarkredumbrellapykrelliankristiansandcatsiiitesilklabudhabikinokawabajddarqhachirogatakanabeardubaioiraseekatsushikabedzin-brb-hostingkristiansundkrodsheradkrokstadelvaldaostavangerkropyvnytskyis-a-photographerokuappinkfh-muensterkrymisasaguris-a-playershiftrentinoaadigekumamotoyamatsumaebashimogosenkumanowtvalleedaostekumatorinokumejimatsumotofukekumenanyokkaichirurgiens-dentistes-en-francekundenkunisakis-a-republicanonoichinosekigaharakunitachiaraisaijorpelandkunitomigusukukis-a-rockstarachowicekunneppubtlsimple-urlkuokgroupiwatekurgankurobeebyteappleykurogiminamiawajikis-a-socialistockholmestrandkuroisodegaurakuromatsunais-a-soxfankuronkurotakikawasakis-a-studentalkushirogawakustanais-a-teacherkassyncloudkusupabaseminekutchanelkutnokuzumakis-a-techietis-a-liberalkvafjordkvalsundkvamfamplifyappchizip6kvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspectrumisawamjondalenmonza-brianzapposirdalmonza-e-della-brianzaptonsbergmonzabrianzaramonzaebrianzamonzaedellabrianzamordoviamorenapolicemoriyamatsushigemoriyoshiminamibosoftwarendalenugmormonstermoroyamatsuuramortgagemoscowinbarrel-of-knowledgekey-stagingjerstadigickaracolognemrstudio-prodoyonagoyauthgearapps-1and1moseushimoichikuzenmosjoenmoskenesiskomakis-a-therapistoiamosslupskmpspbaremetalpha-myqnapcloudaccess3-sa-east-1mosviknx-serversicherungmotegirlymoviemovimientoolslzmtrainingmuikamiokameokameyamatotakadamukodairamunakatanemuosattemupixolinodeusercontentrentinosud-tirolmurmanskomatsushimasudamurotorcraftrentinosudtirolmusashinodesakatakayamatsuzakis-an-accountantshiratakahagiangmuseumisconfusedmusicanthoboleslawiecommerce-shopitsitevaksdalmutsuzawamutualmy-vigormy-wanggoupilemyactivedirectorymyaddrangedalmyamazeplaymyasustor-elvdalmycloudnasushiobaramydattolocalcertrentinosued-tirolmydbservermyddnskingmydissentrentinosuedtirolmydnsmolaquilarvikomforbargainstitutemp-dnswatches3-us-east-2mydobissmarterthanyoumydrobofageorgeorgiamydsmushcdn77-securecipescaracalculatorskenmyeffectrentinsud-tirolmyfastly-edgemyfirewalledreplittlestargardmyforumishimatsusakahoginozawaonsennanmokurennebuyshousesimplesitemyfritzmyftpaccessojampanasonichernovtsydneymyhome-servermyjinomykolaivencloud66mymailermymediapchiryukyuragifuchungbukharanzanishinoomotegoismailillehammerfeste-ipartsamegawamynetnamegawamyokohamamatsudamypepizzamypetsokananiimilanoticiassurfastly-terrariuminamiizukaminoyamaxunison-servicesaxomyphotoshibalena-devicesokndalmypiemontemypsxn--42c2d9amyrdbxn--45br5cylmysecuritycamerakermyshopblocksolardalmyshopifymyspreadshopselectrentinsudtirolmytabitordermythic-beastsolundbeckommunalforbundmytis-a-bloggermytuleap-partnersomamyvnchitachinakagawassamukawatarittogitsuldalutskartuzymywirebungoonoplurinacionalpmnpodhalepodlasiellakdnepropetrovskanlandpodzonepohlpoivronpokerpokrovskomonotteroypolkowicepoltavalle-aostavernpolyspacepomorzeszowindowsserveftplatter-appkommuneponpesaro-urbino-pesarourbinopesaromasvuotnaritakurashikis-an-actresshishikuis-a-libertarianpordenonepornporsangerporsangugeporsgrunnanpoznanpraxihuanprdprereleaseoullensakerprgmrprimetelprincipenzaprivatelinkyard-cloudletsomnarvikomorotsukaminokawanishiaizubangeprivatizehealthinsuranceprogressivegarsheiyufueliv-dnsoowinepromoliserniapropertysnesopotrentinsued-tirolprotectionprotonetrentinsuedtirolprudentialpruszkowinnersor-odalprvcyprzeworskogpunyukis-an-anarchistoloseyouripinokofuefukihabororoshisogndalpupulawypussycatanzarowiosor-varangerpvhackerpvtrentoyosatoyookaneyamazoepwchitosetogliattipsamnangerpzqotoyohashimotoyakokamimineqponiatowadaqslgbtrevisognequalifioapplatterpl-wawsappspacehostedpicardquangngais-an-artistordalquangninhthuanquangtritonoshonais-an-engineeringquickconnectroandindependent-inquest-a-la-masionquicksytesorfoldquipelementsorocabalestrandabergamochizukijobservablehqldquizzesorreisahayakawakamiichinomiyagithubpreviewskrakowitdkontoguraswinoujscienceswissphinxn--45brj9chonanbunkyonanaoshimaringatlanbibaiduckdnsamparachutinglugsjcbnpparibashkiriasyno-dspjelkavikongsbergsynology-diskstationsynology-dspockongsvingertushungrytuvalle-daostaobaolbia-tempio-olbiatempioolbialowiezaganquangnamasteigenoamishirasatochigiftsrhtrogstadtuxfamilytuyenquangbinhthuantwmailvegasrlvelvetromsohuissier-justiceventurestaurantrustkanieruchomoscientistoripresspydebergvestfoldvestnesrvaomoriguchiharaffleentrycloudflare-ipfsortlandvestre-slidrecreationvestre-totennishiawakuravestvagoyvevelstadvfstreakusercontentroitskoninfernovecorealtorvibo-valentiavibovalentiavideovinhphuchoshichikashukudoyamakeupartysfjordrivelandrobakamaihd-stagingmbhartinnishinoshimattelemarkhangelskaruizawavinnicapitalonevinnytsiavipsinaapplockervirginankokubunjis-byklecznagatorokunohealth-carereformincommbankhakassiavirtual-uservecounterstrikevirtualservervirtualuserveexchangevisakuholeckobierzyceviterboliviajessheimperiavivianvivoryvixn--45q11chowdervlaanderennesoyvladikavkazimierz-dolnyvladimirvlogisticstreamlitapplcube-serversusakis-an-actorvmitourismartlabelingvolvologdanskontumintshowavolyngdalvoorlopervossevangenvotevotingvotoyotap-southeast-5vps-hostreaklinkstrippervusercontentrvaporcloudwiwatsukiyonotairesindevicenzaokinawashirosatochiokinoshimagazinewixsitewixstudio-fipstrynwjgorawkzwloclawekonyvelolipopmcdirwmcloudwmelhustudynamisches-dnsorumisugitomobegetmyipifony-2wmflabstuff-4-salewoodsidell-ogliastrapiapplinzis-certifiedworldworse-thanhphohochiminhadanorthflankatsuyamassa-carrara-massacarraramassabunzenwowithgoogleapiszwpdevcloudwpenginepoweredwphostedmailwpmucdn77-sslingwpmudevelopmentrysiljanewaywpsquaredwritesthisblogoiplumbingotpantheonsitewroclawsglobalacceleratorahimeshimakanegasakievennodebalancernwtcp4wtfastlylbarefootballooningjerdrumemergencyonabarumemorialivornobservereitatsunofficialolitapunkapsienamsskoganeindependent-panelombardiademfakefurniturealestatefarmerseinemrnotebooks-prodeomniwebthings3-object-lambdauthgear-stagingivestbyglandroverhallair-traffic-controllagdenesnaaseinet-freaks3-deprecatedgcagliarissadistgstagempresashibetsukuiitatebayashikaoirmembers3-eu-central-1kapp-ionosegawafaicloudineat-urlive-websitehimejibmdevinapps3-ap-southeast-1337wuozuerichardlillesandefjordwwwithyoutuberspacewzmiuwajimaxn--4it797koobindalxn--4pvxs4allxn--54b7fta0cchromediatechnologyeongbukarumaifmemsetkmaxxn--1ctwolominamatarpitksatmalluxenishiokoppegardrrxn--55qw42gxn--55qx5dxn--5dbhl8dxn--5js045dxn--5rtp49chungnamdalseidfjordtvsangotsukitahiroshimarcherkasykkylvenneslaskerrypropertiesanjotelulublindesnesannanishitosashimizunaminamidaitolgaularavellinodeobjectsannoheliohostrodawaraxn--5rtq34kooris-a-nascarfanxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264churchaselfipirangallupsunappgafanishiwakinuyamashinazawaxn--80aaa0cvacationstufftoread-booksnesoundcastreak-linkomvuxn--3pxu8khmelnitskiyamassivegridxn--80adxhksurnadalxn--80ao21axn--80aqecdr1axn--80asehdbarrell-of-knowledgesuite-stagingjesdalombardyn-vpndns3-us-gov-east-1xn--80aswgxn--80audnedalnxn--8dbq2axn--8ltr62kopervikhmelnytskyivalleeaostexn--8pvr4uxn--8y0a063axn--90a1affinitylotterybnikeisencoreapiacenzachpomorskiengiangxn--90a3academiamibubbleappspotagerxn--90aeroportsinfolkebibleasingrok-freeddnsfreebox-osascoli-picenogatachikawakayamadridvagsoyerxn--90aishobaraoxn--90amckinseyxn--90azhytomyradweblikes-piedmontuckerxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byameloyxn--asky-iraxn--aurskog-hland-jnbarsycenterprisecloudbeesusercontentattoolforgerockyonagunicloudiscordsays3-us-gov-west-1xn--avery-yuasakuragawaxn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbarsyonlinequipmentaveusercontentawktoyonomurauthordalandroidienbienishiazaiiyamanouchikujolsterehabmereisenishigotembaixadavvesiidaknongivingjemnes3-eu-north-1xn--bck1b9a5dre4ciprianiigatairaumalatvuopmicrosoftbankasaokamikoaniikappudopaaskvollocaltonetlifyinvestmentsanokashibatakatsukiyosembokutamakiyosunndaluxuryxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2hosted-by-previderxn--bjarky-fyanagawaxn--bjddar-ptarumizusawaxn--blt-elabkhaziamallamaceiobbcircleaningmodelscapetownnews-stagingmxn--1lqs03nissandoyxn--bmlo-grafana-developerauniterois-coolblogdnshisuifuettertdasnetzxn--bod-2naturalxn--bozen-sdtirol-2obihirosakikamijimayfirstorjdevcloudjiffyxn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigation-aptibleadpagespeedmobilizeropslattumbriaxn--brum-voagatulaspeziaxn--btsfjord-9zaxn--bulsan-sdtirol-nsbasicserver-on-webpaaskimitsubatamicrolightingjovikaragandautoscanaryggeemrappui-productions3-eu-west-1xn--c1avgxn--c2br7gxn--c3s14mitoyoakexn--cck2b3basilicataniavocats3-eu-west-2xn--cckwcxetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-foundationxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-storagencymrulezajskiptveterinaireadthedocs-hostedogawarabikomaezakishimabarakawagoexn--czr694basketballfinanzlgkpmglassessments3-us-west-1xn--czrs0t0xn--czru2dxn--d1acj3batsfjordiscordsezpisdnipropetrovskygearapparasiteu-2xn--d1alfastvps-serverisignxn--d1atunesquaresinstagingxn--d5qv7z876ciscofreakadns-cloudflareglobalashovhachijoinvilleirfjorduponthewifidelitypeformesswithdnsantamariakexn--davvenjrga-y4axn--djrs72d6uyxn--djty4koryokamikawanehonbetsuwanouchikuhokuryugasakis-a-nursellsyourhomeftpinbrowsersafetymarketshiraois-a-landscaperspectakasugais-a-lawyerxn--dnna-graingerxn--drbak-wuaxn--dyry-iraxn--e1a4cistrondheimeteorappassenger-associationissayokoshibahikariyalibabacloudcsantoandrecifedexperts-comptablesanukinzais-a-bruinsfanissedalvivanovoldaxn--eckvdtc9dxn--efvn9surveysowaxn--efvy88hadselbuzentsujiiexn--ehqz56nxn--elqq16haebaruericssongdalenviknakatombetsumitakagildeskaliszxn--eveni-0qa01gaxn--f6qx53axn--fct429kosaigawaxn--fhbeiarnxn--finny-yuaxn--fiq228c5hsbcitadelhichisochimkentmpatriaxn--fiq64bauhauspostman-echofunatoriginstances3-us-west-2xn--fiqs8susonoxn--fiqz9suzakarpattiaaxn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbentleyoriikarasjohkamikitayamatsurindependent-review-credentialless-staticblitzw-staticblitzxn--forlcesena-c8axn--fpcrj9c3dxn--frde-grajewolterskluwerxn--frna-woaxn--frya-hraxn--fzc2c9e2citicaravanylvenetogakushimotoganexn--fzys8d69uvgmailxn--g2xx48civilaviationionjukujitawaravennaharimalborkdalxn--gckr3f0fauskedsmokorsetagayaseralingenovaraxn--gecrj9clancasterxn--ggaviika-8ya47hagakhanhhoabinhduongxn--gildeskl-g0axn--givuotna-8yanaizuxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-gonexn--gmqw5axn--gnstigbestellen-zvbentrendhostingleezeu-3xn--gnstigliefern-wobiraxn--h-2failxn--h1ahnxn--h1alizxn--h2breg3evenesuzukanazawaxn--h2brj9c8cldmail-boxfuseljeducationporterxn--h3cuzk1dielddanuorris-into-animein-vigorlicexn--hbmer-xqaxn--hcesuolo-7ya35beppublic-inquiryoshiokanumazuryurihonjouwwebhoptokigawavoues3-eu-west-3xn--hebda8beskidyn-ip24xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-s4accident-prevention-fleeklogesquare7xn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyandexcloudxn--io0a7is-into-carshitaramaxn--j1adpdnsupdaterxn--j1aefbsbxn--2m4a15exn--j1ael8bestbuyshoparenagareyamagentositenrikuzentakataharaholtalengerdalwaysdatabaseballangenkainanaejrietiengiangheannakadomarineen-rootaribeiraogakicks-assnasaarlandiscountry-snowplowiczeladzxn--j1amhagebostadxn--j6w193gxn--jlq480n2rgxn--jlster-byaotsurgeryxn--jrpeland-54axn--jvr189mittwaldserverxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--4dbgdty6choyodobashichinohealthcareersamsclubartowest1-usamsungminakamichikaiseiyoichipsandvikcoromantovalle-d-aostakinouexn--koluokta-7ya57haibarakitakamiizumisanofidonnakaniikawatanaguraxn--kprw13dxn--kpry57dxn--kput3is-into-cartoonshizukuishimojis-a-linux-useranishiaritabashikshacknetlibp2pimientaketomisatourshiranukamitondabayashiogamagoriziaxn--krager-gyasakaiminatoyotomiyazakis-into-gamessinaklodzkochikushinonsenasakuchinotsuchiurakawaxn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jdfirmalselveruminisitexn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasugitlabbvieeexn--kvnangen-k0axn--l-1fairwindsuzukis-an-entertainerxn--l1accentureklamborghinikolaeventsvalbardunloppadoval-d-aosta-valleyxn--laheadju-7yasuokannamimatakatoris-leetrentinoalto-adigexn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52bhzc01xn--lesund-huaxn--lgbbat1ad8jejuxn--lgrd-poacctfcloudflareanycastcgroupowiat-band-campaignoredstonedre-eikerxn--lhppi-xqaxn--linds-pramericanexpresservegame-serverxn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liaclerkstagentsaobernardovre-eikerxn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddesvchoseikarugalsacexn--mgb9awbfbx-oschokokekscholarshipschoolbusinessebytomaridagawarmiastapleschoolsztynsetranoyxn--mgba3a3ejtunkonsulatinowruzhgorodxn--mgba3a4f16axn--mgba3a4fra1-dellogliastraderxn--mgba7c0bbn0axn--mgbaam7a8haiduongxn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00bialystokkeymachineu-4xn--mgbai9azgqp6jelasticbeanstalkhersonlanxesshizuokamogawaxn--mgbayh7gparaglidingxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexperimentsveioxn--mgbpl2fhskypecoris-localhostcertificationxn--mgbqly7c0a67fbclever-clouderavpagexn--mgbqly7cvafricapooguyxn--mgbt3dhdxn--mgbtf8fldrvareservdxn--mgbtx2bielawalbrzycharternopilawalesundiscourses3-website-ap-northeast-1xn--mgbx4cd0abogadobeaemcloud-ip-dynamica-west-1xn--mix082fbxoschulplattforminamimakis-a-catererxn--mix891fedjeepharmacienschulserverxn--mjndalen-64axn--mk0axindependent-inquiryxn--mk1bu44cleverappsaogoncanva-appsaotomelbournexn--mkru45is-lostrolekamakurazakiwielunnerxn--mlatvuopmi-s4axn--mli-tlavagiskexn--mlselv-iuaxn--moreke-juaxn--mori-qsakurais-not-axn--mosjen-eyatsukanoyaizuwakamatsubushikusakadogawaxn--mot-tlavangenxn--mre-og-romsdal-qqbuservebolturindalxn--msy-ula0haiphongolffanshimosuwalkis-a-designerxn--mtta-vrjjat-k7aflakstadotsurugimbiella-speziaxarnetbankanzakiyosatokorozawaustevollpagest-mon-blogueurovision-ranchernigovernmentdllivingitpagemprendeatnuh-ohtawaramotoineppueblockbusterniizaustrheimdbambinagisobetsucks3-ap-southeast-2xn--muost-0qaxn--mxtq1miuraxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--4dbrk0cexn--nit225kosakaerodromegalloabatobamaceratabusebastopoleangaviikafjordxn--nmesjevuemie-tcbalsan-sudtirolkuszczytnord-fron-riopretodayxn--nnx388axn--nodeloittexn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeservehalflifeinsurancexn--nvuotna-hwaxn--nyqy26axn--o1achernivtsicilyxn--o3cw4hair-surveillancexn--o3cyx2axn--od0algardxn--od0aq3bielskoczoweddinglitcheap-south-2xn--ogbpf8flekkefjordxn--oppegrd-ixaxn--ostery-fyatsushiroxn--osyro-wuaxn--otu796dxn--p1acfolksvelvikonskowolayangroupippugliaxn--p1ais-not-certifiedxn--pgbs0dhakatanortonkotsumomodenakatsugawaxn--porsgu-sta26fedorainfracloudfunctionschwarzgwesteuropencraftransfer-webappharmacyou2-localplayerxn--pssu33lxn--pssy2uxn--q7ce6axn--q9jyb4clickrisinglesjaguarvodkagaminombrendlyngenebakkeshibukawakeliwebhostingouv0xn--qcka1pmcprequalifymeinforumzxn--qqqt11miyazure-mobilevangerxn--qxa6axn--qxamiyotamanoxn--rady-iraxn--rdal-poaxn--rde-ulazioxn--rdy-0nabaris-savedxn--rennesy-v1axn--rhkkervju-01afedorapeopleikangerxn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5naturbruksgymnxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byawaraxn--rny31hakodatexn--rovu88bieszczadygeyachimataijinderoyusuharazurefdietateshinanomachintaifun-dnsaliases121xn--rros-granvindafjordxn--rskog-uuaxn--rst-0navigationxn--rsta-framercanvasvn-repospeedpartnerxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byawatahamaxn--s-1faitheshopwarezzoxn--s9brj9clientoyotsukaidownloadurbanamexnetfylkesbiblackbaudcdn-edgestackhero-networkinggroupperxn--sandnessjen-ogbizxn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgne-graphicswidnicaobangxn--skierv-utazurecontainerimamateramombetsupplieswidnikitagatamayukuhashimokitayamaxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5navoizumizakis-slickharkivallee-aosteroyxn--slt-elabievathletajimabaria-vungtaudiopsys3-website-ap-southeast-1xn--smla-hraxn--smna-gratangenxn--snase-nraxn--sndre-land-0cbifukagawalmartaxiijimarugame-hostrowieconomiasagaeroclubmedecin-berlindasdaeguambulancechireadmyblogsytecnologiazurestaticappspaceusercontentproxy9guacuiababia-goraclecloudappschaefflereggiocalabriaurland-4-salernooreggioemiliaromagnarusawaurskog-holandinggff5xn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbigv-infolldalomoldegreeu-central-2xn--srfold-byaxn--srreisa-q1axn--srum-gratis-a-bookkeepermashikexn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbiharvanedgeappengineu-south-1xn--stre-toten-zcbihoronobeokayamagasakikuchikuseihicampinashikiminohostfoldiscoverbaniazurewebsitests3-external-1xn--t60b56axn--tckwebview-assetswiebodzindependent-commissionxn--tiq49xqyjelenia-goraxn--tjme-hraxn--tn0agrocerydxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbikedaejeonbuk0emmafann-arborlandd-dnsfor-better-thanhhoarairkitapps-audiblebesbyencowayokosukanraetnaamesjevuemielnogiehtavuoatnabudejjuniper2-ddnss3-123minsidaarborteamsterdamnserverseating-organicbcg123homepagexl-o-g-i-navyokote123hjemmesidealerdalaheadjuegoshikibichuo0o0g0xn--trentin-sdtirol-7vbiomutazas3-website-ap-southeast-2xn--trentino-sd-tirol-c3birkenesoddtangentapps3-website-eu-west-1xn--trentino-sdtirol-szbittermezproxyusuitatamotors3-website-sa-east-1xn--trentinosd-tirol-rzbjarkoyuullensvanguardisharparisor-fronishiharaxn--trentinosdtirol-7vbjerkreimmobilieniwaizumiotsukumiyamazonaws-cloud9xn--trentinsd-tirol-6vbjugnieznorddalomzaporizhzhiaxn--trentinsdtirol-nsblackfridaynightayninhaccalvinklein-butterepairbusanagochigasakindigenakayamarumorimachidaxn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvarggatromsakegawaxn--uc0ay4axn--uist22hakonexn--uisz3gxn--unjrga-rtashkenturystykanmakiyokawaraxn--unup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbtuscanyxn--valle-d-aoste-ehboehringerikerxn--valleaoste-e7axn--valledaoste-ebbvaapstempurlxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb-hostingxn--vermgensberatung-pwbloombergentingliwiceu-south-2xn--vestvgy-ixa6oxn--vg-yiablushangrilaakesvuemieleccevervaultgoryuzawaxn--vgan-qoaxn--vgsy-qoa0j0xn--vgu402clinicarbonia-iglesias-carboniaiglesiascarboniaxn--vhquvaroyxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bmoattachments3-website-us-east-1xn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1cliniquenoharaxn--wgbl6axn--xhq521bms3-website-us-gov-west-1xn--xkc2al3hye2axn--xkc2dl3a5ee0hakubaclieu-1xn--y9a3aquarelleborkangerxn--yer-znavuotnarashinoharaxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--4gbriminiserverxn--ystre-slidre-ujbmwcloudnonproddaemongolianishiizunazukindustriaxn--zbx025dxn--zf0avxn--4it168dxn--zfr164bnrweatherchannelsdvrdns3-website-us-west-1xnbayernxz \ No newline at end of file diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go index 56069d04296..047cb30eb15 100644 --- a/vendor/golang.org/x/net/publicsuffix/list.go +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -77,7 +77,7 @@ func (list) String() string { // privately managed domain (and in practice, not a top level domain) or an // unmanaged top level domain (and not explicitly mentioned in the // publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN -// domains, "foo.dyndns.org" and "foo.blogspot.co.uk" are private domains and +// domains, "foo.dyndns.org" is a private domain and // "cromulent" is an unmanaged top level domain. // // Use cases for distinguishing ICANN domains like "foo.com" from private diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go index 78d400fa653..0fadf9527f7 100644 --- a/vendor/golang.org/x/net/publicsuffix/table.go +++ b/vendor/golang.org/x/net/publicsuffix/table.go @@ -4,7 +4,7 @@ package publicsuffix import _ "embed" -const version = "publicsuffix.org's public_suffix_list.dat, git revision 63cbc63d470d7b52c35266aa96c4c98c96ec499c (2023-08-03T10:01:25Z)" +const version = "publicsuffix.org's public_suffix_list.dat, git revision 2c960dac3d39ba521eb5db9da192968f5be0aded (2025-03-18T07:22:13Z)" const ( nodesBits = 40 @@ -26,7 +26,7 @@ const ( ) // numTLD is the number of top level domains. -const numTLD = 1474 +const numTLD = 1454 // text is the combined text of all labels. // @@ -63,8 +63,8 @@ var nodes uint40String //go:embed data/children var children uint32String -// max children 743 (capacity 1023) -// max text offset 30876 (capacity 65535) +// max children 870 (capacity 1023) +// max text offset 31785 (capacity 65535) // max text length 31 (capacity 63) -// max hi 9322 (capacity 16383) -// max lo 9317 (capacity 16383) +// max hi 10100 (capacity 16383) +// max lo 10095 (capacity 16383) diff --git a/vendor/modules.txt b/vendor/modules.txt index 8facc3b99f5..66b77f923fa 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -461,7 +461,7 @@ github.com/gogo/protobuf/types # github.com/gogo/status v1.1.1 ## explicit; go 1.12 github.com/gogo/status -# github.com/golang-jwt/jwt/v5 v5.2.1 +# github.com/golang-jwt/jwt/v5 v5.2.2 ## explicit; go 1.18 github.com/golang-jwt/jwt/v5 # github.com/golang-migrate/migrate/v4 v4.18.1 @@ -1327,6 +1327,12 @@ go.opentelemetry.io/proto/otlp/trace/v1 # go.uber.org/atomic v1.11.0 ## explicit; go 1.18 go.uber.org/atomic +# go.uber.org/automaxprocs v1.6.0 +## explicit; go 1.20 +go.uber.org/automaxprocs +go.uber.org/automaxprocs/internal/cgroups +go.uber.org/automaxprocs/internal/runtime +go.uber.org/automaxprocs/maxprocs # go.uber.org/goleak v1.3.0 ## explicit; go 1.20 go.uber.org/goleak @@ -1374,7 +1380,7 @@ golang.org/x/exp/slices # golang.org/x/mod v0.24.0 ## explicit; go 1.23.0 golang.org/x/mod/semver -# golang.org/x/net v0.37.0 +# golang.org/x/net v0.38.0 ## explicit; go 1.23.0 golang.org/x/net/bpf golang.org/x/net/context