2
2
. /etc/sysconfig/heat-params
3
3
set -ex
4
4
5
- CHART_NAME=" prometheus-operator "
5
+ CHART_NAME=" kube- prometheus-stack "
6
6
7
7
if [ " $( echo ${MONITORING_ENABLED} | tr ' [:upper:]' ' [:lower:]' ) " = " true" ]; then
8
8
echo " Writing ${CHART_NAME} config"
80
80
PROTOCOL=" http"
81
81
INSECURE_SKIP_VERIFY=" True"
82
82
fi
83
- # FIXME: Force protocol to http as we don't want to use the cluster certs
84
- USE_HTTPS=" False"
85
83
86
84
if [ " $( echo ${VERIFY_CA} | tr ' [:upper:]' ' [:lower:]' ) " == " false" ]; then
87
85
INSECURE_SKIP_VERIFY=" True"
88
86
fi
89
87
90
88
cat << EOF >> ${HELM_CHART_DIR} /values.yaml
91
- prometheus-operator:
92
-
93
- defaultRules:
94
- rules:
95
- #TODO: To enable this we need firstly take care of exposing certs
96
- etcd: false
89
+ kube-prometheus-stack:
97
90
98
91
alertmanager:
92
+ podDisruptionBudget:
93
+ enabled: true
94
+ #config:
99
95
ingress:
100
96
enabled: ${MONITORING_INGRESS_ENABLED}
101
97
annotations:
@@ -108,6 +104,7 @@ ${APP_INGRESS_BASIC_AUTH_ANNOTATIONS}
108
104
- ${CLUSTER_ROOT_DOMAIN_NAME}
109
105
paths:
110
106
- /alertmanager${APP_INGRESS_PATH_APPEND}
107
+ pathType: ImplementationSpecific
111
108
## TLS configuration for Alertmanager Ingress
112
109
## Secret must be manually created in the namespace
113
110
tls: []
@@ -118,24 +115,16 @@ ${APP_INGRESS_BASIC_AUTH_ANNOTATIONS}
118
115
image:
119
116
repository: ${CONTAINER_INFRA_PREFIX:- quay.io/ prometheus/ } alertmanager
120
117
logFormat: json
118
+ routePrefix: /alertmanager
121
119
externalUrl: https://${CLUSTER_ROOT_DOMAIN_NAME} /alertmanager
122
- # routePrefix: /alertmanager
123
120
# resources:
124
121
# requests:
125
122
# cpu: 100m
126
123
# memory: 256Mi
127
124
priorityClassName: "system-cluster-critical"
128
125
129
126
grafana:
130
- image:
131
- repository: ${CONTAINER_INFRA_PREFIX:- grafana/ } grafana
132
127
#enabled: ${ENABLE_GRAFANA}
133
- sidecar:
134
- image: ${CONTAINER_INFRA_PREFIX:- kiwigrid/ } k8s-sidecar:0.1.99
135
- resources:
136
- requests:
137
- cpu: 100m
138
- memory: 128Mi
139
128
adminPassword: ${GRAFANA_ADMIN_PASSWD}
140
129
ingress:
141
130
enabled: ${MONITORING_INGRESS_ENABLED}
@@ -146,13 +135,24 @@ ${APP_INGRESS_ANNOTATIONS}
146
135
## Must be provided if Ingress is enable.
147
136
hosts:
148
137
- ${CLUSTER_ROOT_DOMAIN_NAME}
149
- path: /grafana${APP_INGRESS_PATH_APPEND}
138
+ paths:
139
+ - /grafana${APP_INGRESS_PATH_APPEND}
140
+ pathType: ImplementationSpecific
150
141
## TLS configuration for grafana Ingress
151
142
## Secret must be manually created in the namespace
152
143
tls: []
153
144
# - secretName: grafana-general-tls
154
145
# hosts:
155
146
# - grafana.example.com
147
+ sidecar:
148
+ image:
149
+ repository: ${CONTAINER_INFRA_PREFIX:- quay.io/ kiwigrid/ } k8s-sidecar
150
+ image:
151
+ repository: ${CONTAINER_INFRA_PREFIX:- grafana/ } grafana
152
+ resources:
153
+ requests:
154
+ cpu: 100m
155
+ memory: 128Mi
156
156
persistence:
157
157
enabled: ${APP_GRAFANA_PERSISTENT_STORAGE}
158
158
storageClassName: ${MONITORING_STORAGE_CLASS_NAME}
@@ -162,21 +162,10 @@ ${APP_INGRESS_ANNOTATIONS}
162
162
domain: ${CLUSTER_ROOT_DOMAIN_NAME}
163
163
root_url: https://${CLUSTER_ROOT_DOMAIN_NAME} /grafana
164
164
serve_from_sub_path: true
165
- paths:
166
- data: /var/lib/grafana/data
167
- logs: /var/log/grafana
168
- plugins: /var/lib/grafana/plugins
169
- provisioning: /etc/grafana/provisioning
170
- analytics:
171
- check_for_updates: true
172
165
log:
173
166
mode: console
174
167
log.console:
175
168
format: json
176
- grafana_net:
177
- url: https://grafana.net
178
- plugins:
179
- - grafana-piechart-panel
180
169
181
170
kubeApiServer:
182
171
tlsConfig:
@@ -198,9 +187,9 @@ ${APP_INGRESS_ANNOTATIONS}
198
187
serviceMonitor:
199
188
## Enable scraping kube-controller-manager over https.
200
189
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
201
- https: ${USE_HTTPS}
190
+ https: "True"
202
191
# Skip TLS certificate validation when scraping
203
- insecureSkipVerify: null
192
+ insecureSkipVerify: "True"
204
193
# Name of the server to use when validating TLS certificate
205
194
serverName: null
206
195
@@ -242,19 +231,21 @@ ${APP_INGRESS_ANNOTATIONS}
242
231
serviceMonitor:
243
232
## Enable scraping kube-scheduler over https.
244
233
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
245
- https: ${USE_HTTPS}
234
+ https: "True"
246
235
## Skip TLS certificate validation when scraping
247
- insecureSkipVerify: null
236
+ insecureSkipVerify: "True"
248
237
## Name of the server to use when validating TLS certificate
249
238
serverName: null
250
239
251
- # kubeProxy:
252
- # ## If your kube proxy is not deployed as a pod, specify IPs it can be found on
253
- # endpoints: [] # masters + minions
254
- # serviceMonitor:
255
- # ## Enable scraping kube-proxy over https.
256
- # ## Requires proper certs (not self-signed) and delegated authentication/authorization checks
257
- # https: ${USE_HTTPS}
240
+ kubeProxy:
241
+ ## If your kube proxy is not deployed as a pod, specify IPs it can be found on
242
+ endpoints: ${KUBE_MASTERS_PRIVATE} # masters + minions
243
+ serviceMonitor:
244
+ ## Enable scraping kube-proxy over https.
245
+ ## Requires proper certs (not self-signed) and delegated authentication/authorization checks
246
+ https: "True"
247
+ ## Skip TLS certificate validation when scraping
248
+ insecureSkipVerify: "True"
258
249
259
250
kube-state-metrics:
260
251
priorityClassName: "system-cluster-critical"
@@ -271,37 +262,34 @@ ${APP_INGRESS_ANNOTATIONS}
271
262
limits:
272
263
cpu: 20m
273
264
memory: 20M
274
- extraArgs:
275
- - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
276
- - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
277
- sidecars: []
278
- ## - name: nvidia-dcgm-exporter
279
- ## image: nvidia/dcgm-exporter:1.4.3
280
265
281
266
prometheusOperator:
282
- priorityClassName: "system-cluster-critical"
283
- tlsProxy:
284
- image:
285
- repository: ${CONTAINER_INFRA_PREFIX:- squareup/ } ghostunnel
286
267
admissionWebhooks:
287
268
patch:
288
269
image:
289
270
repository: ${CONTAINER_INFRA_PREFIX:- jettech/ } kube-webhook-certgen
290
- priorityClassName: "system-cluster-critical"
291
-
292
- resources: {}
293
- # requests:
294
- # cpu: 5m
295
- # memory: 10Mi
271
+ resources:
272
+ requests:
273
+ cpu: 2m
274
+ limits:
275
+ memory: 30M
276
+ # clusterDomain: ${CLUSTER_ROOT_DOMAIN_NAME}
277
+ priorityClassName: "system-cluster-critical"
296
278
logFormat: json
279
+ logLevel: info
280
+ resources:
281
+ requests:
282
+ cpu: 2m
283
+ limits:
284
+ memory: 32M
297
285
image:
298
- repository: ${CONTAINER_INFRA_PREFIX:- quay.io/ coreos / } prometheus-operator
299
- configmapReloadImage:
300
- repository : ${CONTAINER_INFRA_PREFIX:- quay.io/ coreos / } configmap-reload
286
+ repository: ${CONTAINER_INFRA_PREFIX:- quay.io/ prometheus-operator / } prometheus-operator
287
+ prometheusDefaultBaseImage: ${CONTAINER_INFRA_PREFIX :- quay.io / prometheus / } prometheus
288
+ alertmanagerDefaultBaseImage : ${CONTAINER_INFRA_PREFIX:- quay.io/ prometheus / } alertmanager
301
289
prometheusConfigReloaderImage:
302
- repository: ${CONTAINER_INFRA_PREFIX:- quay.io/ coreos / } prometheus-config-reloader
303
- hyperkubeImage :
304
- repository: ${CONTAINER_INFRA_PREFIX:- k8s.gcr. io/ } hyperkube
290
+ repository: ${CONTAINER_INFRA_PREFIX:- quay.io/ prometheus-operator / } prometheus-config-reloader
291
+ thanosImage :
292
+ repository: ${CONTAINER_INFRA_PREFIX:- quay. io/ thanos / } thanos
305
293
306
294
prometheus:
307
295
ingress:
@@ -317,6 +305,7 @@ ${APP_INGRESS_BASIC_AUTH_ANNOTATIONS}
317
305
- ${CLUSTER_ROOT_DOMAIN_NAME}
318
306
paths:
319
307
- /prometheus${APP_INGRESS_PATH_APPEND}
308
+ pathType: ImplementationSpecific
320
309
## TLS configuration for Prometheus Ingress
321
310
## Secret must be manually created in the namespace
322
311
tls: []
@@ -332,11 +321,13 @@ ${APP_INGRESS_BASIC_AUTH_ANNOTATIONS}
332
321
bearerTokenFile:
333
322
prometheusSpec:
334
323
scrapeInterval: ${MONITORING_INTERVAL_SECONDS} s
335
- scrapeInterval: 30s
336
324
evaluationInterval: 30s
337
325
image:
338
326
repository: ${CONTAINER_INFRA_PREFIX:- quay.io/ prometheus/ } prometheus
339
- retention: 14d
327
+ tolerations:
328
+ - key: "node-role.kubernetes.io/master"
329
+ operator: "Exists"
330
+ effect: "NoSchedule"
340
331
externalLabels:
341
332
cluster_uuid: ${CLUSTER_UUID}
342
333
externalUrl: https://${CLUSTER_ROOT_DOMAIN_NAME} /prometheus
@@ -352,7 +343,16 @@ ${APP_INGRESS_BASIC_AUTH_ANNOTATIONS}
352
343
retention: ${MONITORING_RETENTION_DAYS} d
353
344
retentionSize: ${MONITORING_RETENTION_SIZE_GB} GB
354
345
logFormat: json
355
- #routePrefix: /prometheus
346
+ routePrefix: /prometheus
347
+ affinity:
348
+ nodeAffinity:
349
+ requiredDuringSchedulingIgnoredDuringExecution:
350
+ nodeSelectorTerms:
351
+ - matchExpressions:
352
+ - key: magnum.openstack.org/role
353
+ operator: In
354
+ values:
355
+ - master
356
356
resources:
357
357
requests:
358
358
cpu: ${PROMETHEUS_SERVER_CPU} m
0 commit comments