|
1 | 1 | {
|
2 | 2 | local container = $.core.v1.container,
|
3 | 3 |
|
4 |
| - query_frontend_args:: { |
5 |
| - target: 'query-frontend', |
6 |
| - |
7 |
| - // Need log.level=debug so all queries are logged, needed for analyse.py. |
8 |
| - 'log.level': 'debug', |
9 |
| - |
10 |
| - // Increase HTTP server response write timeout, as we were seeing some |
11 |
| - // queries that return a lot of data timeing out. |
12 |
| - 'server.http-write-timeout': '1m', |
13 |
| - |
14 |
| - // Split long queries up into multiple day-long queries. |
15 |
| - 'querier.split-queries-by-interval': '24h', |
16 |
| - |
17 |
| - // Cache query results. |
18 |
| - 'querier.align-querier-with-step': true, |
19 |
| - 'querier.cache-results': true, |
20 |
| - 'frontend.memcached.hostname': 'memcached-frontend.%s.svc.cluster.local' % $._config.namespace, |
21 |
| - 'frontend.memcached.service': 'memcached-client', |
22 |
| - 'frontend.memcached.timeout': '500ms', |
23 |
| - |
24 |
| - // So that exporters like cloudwatch can still send in data and be un-cached. |
25 |
| - 'frontend.max-cache-freshness': '10m', |
26 |
| - |
27 |
| - // Compress HTTP responses; improves latency for very big results and slow |
28 |
| - // connections. |
29 |
| - 'querier.compress-http-responses': true, |
30 |
| - |
31 |
| - // So it can recieve big responses from the querier. |
32 |
| - 'server.grpc-max-recv-msg-size-bytes': 100 << 20, |
33 |
| - |
34 |
| - // Limit queries to 500 days, allow this to be override per-user. |
35 |
| - 'store.max-query-length': '12000h', // 500 Days |
36 |
| - 'limits.per-user-override-config': '/etc/cortex/overrides.yaml', |
37 |
| - } + if $._config.queryFrontend.sharded_queries_enabled then { |
38 |
| - 'querier.parallelise-shardable-queries': 'true', |
39 |
| - |
40 |
| - // in process tenant queues on frontends. We divide by the number of frontends; 2 in this case in order to apply the global limit in aggregate. |
41 |
| - // basically base * shard_factor * query_split_factor / num_frontends where |
42 |
| - 'querier.max-outstanding-requests-per-tenant': std.floor(200 * $._config.queryFrontend.shard_factor * $._config.queryFrontend.query_split_factor / $._config.queryFrontend.replicas), |
43 |
| - |
44 |
| - 'querier.query-ingesters-within': $._config.queryConfig['querier.query-ingesters-within'], |
45 |
| - } + $._config.storageConfig |
46 |
| - else {}, |
| 4 | + query_frontend_args:: |
| 5 | + $._config.ringConfig + |
| 6 | + { |
| 7 | + target: 'query-frontend', |
| 8 | + |
| 9 | + // Need log.level=debug so all queries are logged, needed for analyse.py. |
| 10 | + 'log.level': 'debug', |
| 11 | + |
| 12 | + // Increase HTTP server response write timeout, as we were seeing some |
| 13 | + // queries that return a lot of data timeing out. |
| 14 | + 'server.http-write-timeout': '1m', |
| 15 | + |
| 16 | + // Split long queries up into multiple day-long queries. |
| 17 | + 'querier.split-queries-by-interval': '24h', |
| 18 | + |
| 19 | + // Cache query results. |
| 20 | + 'querier.align-querier-with-step': true, |
| 21 | + 'querier.cache-results': true, |
| 22 | + 'frontend.memcached.hostname': 'memcached-frontend.%s.svc.cluster.local' % $._config.namespace, |
| 23 | + 'frontend.memcached.service': 'memcached-client', |
| 24 | + 'frontend.memcached.timeout': '500ms', |
| 25 | + |
| 26 | + // So that exporters like cloudwatch can still send in data and be un-cached. |
| 27 | + 'frontend.max-cache-freshness': '10m', |
| 28 | + |
| 29 | + // Compress HTTP responses; improves latency for very big results and slow |
| 30 | + // connections. |
| 31 | + 'querier.compress-http-responses': true, |
| 32 | + |
| 33 | + // So it can receive big responses from the querier. |
| 34 | + 'server.grpc-max-recv-msg-size-bytes': 100 << 20, |
| 35 | + |
| 36 | + // Limit queries to 500 days, allow this to be override per-user. |
| 37 | + 'store.max-query-length': '12000h', // 500 Days |
| 38 | + 'limits.per-user-override-config': '/etc/cortex/overrides.yaml', |
| 39 | + } + ( |
| 40 | + if $._config.queryFrontend.sharded_queries_enabled then |
| 41 | + { |
| 42 | + 'querier.parallelise-shardable-queries': 'true', |
| 43 | + |
| 44 | + // in process tenant queues on frontends. We divide by the number of frontends; 2 in this case in order to apply the global limit in aggregate. |
| 45 | + // basically base * shard_factor * query_split_factor / num_frontends where |
| 46 | + 'querier.max-outstanding-requests-per-tenant': std.floor(200 * $._config.queryFrontend.shard_factor * $._config.queryFrontend.query_split_factor / $._config.queryFrontend.replicas), |
| 47 | + |
| 48 | + 'querier.query-ingesters-within': $._config.queryConfig['querier.query-ingesters-within'], |
| 49 | + } + $._config.storageConfig |
| 50 | + else {} |
| 51 | + ), |
47 | 52 |
|
48 | 53 | query_frontend_container::
|
49 | 54 | container.new('query-frontend', $._images.query_frontend) +
|
|
0 commit comments