@@ -8,7 +8,7 @@ source monitoring/bin/common.sh
8
8
source bin/service-url-include.sh
9
9
source bin/autogenerate-include.sh
10
10
11
- if [ " $OPENSHIFT_CLUSTER " = " true" ]; then
11
+ if [ " $OPENSHIFT_CLUSTER " == " true" ]; then
12
12
if [ " ${CHECK_OPENSHIFT_CLUSTER:- true} " = " true" ]; then
13
13
log_error " This script should not be run on OpenShift clusters"
14
14
log_error " Run monitoring/bin/deploy_monitoring_openshift.sh instead"
@@ -35,7 +35,7 @@ autogeneratedYAMLFile="$TMP_DIR/autogenerate-prom-operator.yaml"
35
35
36
36
AUTOGENERATE_STORAGECLASS=" ${AUTOGENERATE_STORAGECLASS:- false} "
37
37
38
- if [ " $AUTOGENERATE_STORAGECLASS " = " true" ]; then
38
+ if [ " $AUTOGENERATE_STORAGECLASS " == " true" ]; then
39
39
40
40
if [ ! -f " $autogeneratedYAMLFile " ]; then
41
41
log_debug " Creating file [$autogeneratedYAMLFile ]"
@@ -65,7 +65,7 @@ if [ ! -f "$PROM_OPER_USER_YAML" ]; then
65
65
PROM_OPER_USER_YAML=$TMP_DIR /empty.yaml
66
66
fi
67
67
68
- if [ " $HELM_DEBUG " = " true" ]; then
68
+ if [ " $HELM_DEBUG " == " true" ]; then
69
69
helmDebug=" --debug"
70
70
fi
71
71
@@ -95,7 +95,7 @@ helmRepoAdd prometheus-community https://prometheus-community.github.io/helm-cha
95
95
96
96
istioValuesFile=$TMP_DIR /empty.yaml
97
97
# Istio - Federate data from Istio's Prometheus instance
98
- if [ " $ISTIO_ENABLED " = " true" ]; then
98
+ if [ " $ISTIO_ENABLED " == " true" ]; then
99
99
log_verbose " Including Istio metric federation"
100
100
istioValuesFile=$TMP_DIR /values-prom-operator-tmp.yaml
101
101
else
105
105
106
106
# Check if Prometheus Operator CRDs are already installed
107
107
PROM_OPERATOR_CRD_UPDATE=${PROM_OPERATOR_CRD_UPDATE:- true}
108
- if [ " $PROM_OPERATOR_CRD_UPDATE " = " true" ]; then
108
+ if [ " $PROM_OPERATOR_CRD_UPDATE " == " true" ]; then
109
109
log_verbose " Updating Prometheus Operator custom resource definitions"
110
110
crds=(alertmanagerconfigs alertmanagers prometheuses prometheusrules podmonitors servicemonitors thanosrulers probes)
111
111
for crd in " ${crds[@]} " ; do
112
112
113
113
# # Determine CRD URL - if in an airgap environment, look for them in USER_DIR.
114
- if [ " $AIRGAP_DEPLOYMENT " = " true" ]; then
114
+ if [ " $AIRGAP_DEPLOYMENT " == " true" ]; then
115
115
crdURL=$USER_DIR /monitoring/prometheus-operator-crd/$PROM_OPERATOR_CRD_VERSION /monitoring.coreos.com_$crd .yaml
116
116
117
117
# # Fail if the CRDs could not be located.
@@ -140,7 +140,7 @@ kubectl delete daemonset -n "$MON_NS" -l app=prometheus-node-exporter --ignore-n
140
140
141
141
# Optional workload node placement support
142
142
MON_NODE_PLACEMENT_ENABLE=${MON_NODE_PLACEMENT_ENABLE:- ${NODE_PLACEMENT_ENABLE:- false} }
143
- if [ " $MON_NODE_PLACEMENT_ENABLE " = " true" ]; then
143
+ if [ " $MON_NODE_PLACEMENT_ENABLE " == " true" ]; then
144
144
log_verbose " Enabling monitoring components for workload node placement"
145
145
wnpValuesFile=" monitoring/node-placement/values-prom-operator-wnp.yaml"
146
146
else
151
151
# Optional TLS Support
152
152
tlsValuesFile=$TMP_DIR /empty.yaml
153
153
tlsPromAlertingEndpointFile=$TMP_DIR /empty.yaml
154
- if [ " $TLS_ENABLE " = " true" ]; then
154
+ if [ " $TLS_ENABLE " == " true" ]; then
155
155
apps=(prometheus alertmanager grafana)
156
156
create_tls_certs " $MON_NS " monitoring " ${apps[@]} "
157
157
@@ -161,7 +161,7 @@ if [ "$TLS_ENABLE" = "true" ]; then
161
161
162
162
log_verbose " Provisioning TLS-enabled Prometheus datasource for Grafana"
163
163
grafanaDS=grafana-datasource-prom-https.yaml
164
- if [ " $MON_TLS_PATH_INGRESS " = " true" ]; then
164
+ if [ " $MON_TLS_PATH_INGRESS " == " true" ]; then
165
165
grafanaDS=grafana-datasource-prom-https-path.yaml
166
166
tlsPromAlertingEndpointFile=monitoring/tls/prom-alertendpoint-path-https.yaml
167
167
fi
179
179
180
180
AUTOGENERATE_INGRESS=" ${AUTOGENERATE_INGRESS:- false} "
181
181
182
- if [ " $AUTOGENERATE_INGRESS " = " true" ]; then
182
+ if [ " $AUTOGENERATE_INGRESS " == " true" ]; then
183
183
184
184
if [ ! -f " $autogeneratedYAMLFile " ]; then
185
185
log_debug " Creating file [$autogeneratedYAMLFile ]"
@@ -188,7 +188,7 @@ if [ "$AUTOGENERATE_INGRESS" = "true" ]; then
188
188
log_debug " File [$autogeneratedYAMLFile ] already exists"
189
189
fi
190
190
191
- if [ " $MON_TLS_PATH_INGRESS " != " true" ] && [ " $ROUTING " = " path" ]; then
191
+ if [ " $MON_TLS_PATH_INGRESS " != " true" ] && [ " $ROUTING " == " path" ]; then
192
192
log_error " Environment variable MON_TLS_PATH_INGRESS must be set to 'true' when ROUTING='path'."
193
193
exit 1
194
194
fi
@@ -209,7 +209,7 @@ if [ "$AUTOGENERATE_INGRESS" = "true" ]; then
209
209
# Don't assign the variable to itself
210
210
ALERTMANAGER_PATH=" ${ALERTMANAGER_PATH:- alertmanager} "
211
211
if [ -z " $ALERTMANAGER_FQDN " ]; then
212
- if [ " $ROUTING " = " host" ]; then
212
+ if [ " $ROUTING " == " host" ]; then
213
213
ALERTMANAGER_FQDN=" $ALERTMANAGER_PATH .$BASE_DOMAIN "
214
214
else
215
215
ALERTMANAGER_FQDN=" $BASE_DOMAIN "
@@ -225,7 +225,7 @@ if [ "$AUTOGENERATE_INGRESS" = "true" ]; then
225
225
# Don't assign the variable to itself
226
226
GRAFANA_PATH=" ${GRAFANA_PATH:- grafana} "
227
227
if [ -z " $GRAFANA_FQDN " ]; then
228
- if [ " $ROUTING " = " host" ]; then
228
+ if [ " $ROUTING " == " host" ]; then
229
229
GRAFANA_FQDN=" $GRAFANA_PATH .$BASE_DOMAIN "
230
230
else
231
231
GRAFANA_FQDN=" $BASE_DOMAIN "
@@ -240,7 +240,7 @@ if [ "$AUTOGENERATE_INGRESS" = "true" ]; then
240
240
# Don't assign the variable to itself
241
241
PROMETHEUS_PATH=" ${PROMETHEUS_PATH:- prometheus} "
242
242
if [ -z " $PROMETHEUS_FQDN " ]; then
243
- if [ " $ROUTING " = " host" ]; then
243
+ if [ " $ROUTING " == " host" ]; then
244
244
PROMETHEUS_FQDN=" $PROMETHEUS_PATH .$BASE_DOMAIN "
245
245
else
246
246
PROMETHEUS_FQDN=" $BASE_DOMAIN "
@@ -261,7 +261,7 @@ if [ "$AUTOGENERATE_INGRESS" = "true" ]; then
261
261
yq -i ' .prometheus.ingress.enabled=env(PROMETHEUS_INGRESS_ENABLE)' " $autogeneratedYAMLFile "
262
262
263
263
# ##hosts, paths and fqdn
264
- if [ " $ROUTING " = " host" ]; then
264
+ if [ " $ROUTING " == " host" ]; then
265
265
yq -i ' .alertmanager.ingress.hosts.[0]=env(ALERTMANAGER_FQDN)' " $autogeneratedYAMLFile "
266
266
yq -i ' .alertmanager.ingress.tls.[0].hosts.[0]=env(ALERTMANAGER_FQDN)' " $autogeneratedYAMLFile "
267
267
exturl=" https://$ALERTMANAGER_FQDN " yq -i ' .alertmanager.alertmanagerSpec.externalUrl=env(exturl)' " $autogeneratedYAMLFile "
315
315
316
316
nodePortValuesFile=$TMP_DIR /empty.yaml
317
317
PROM_NODEPORT_ENABLE=${PROM_NODEPORT_ENABLE:- false}
318
- if [ " $PROM_NODEPORT_ENABLE " = " true" ]; then
318
+ if [ " $PROM_NODEPORT_ENABLE " == " true" ]; then
319
319
log_debug " Enabling NodePort access for Prometheus and Alertmanager"
320
320
nodePortValuesFile=monitoring/values-prom-nodeport.yaml
321
321
fi
@@ -333,7 +333,7 @@ if helm3ReleaseExists "$promRelease" "$MON_NS"; then
333
333
log_verbose " Upgrading via Helm ($( date) - timeout 20m)"
334
334
else
335
335
grafanaPwd=" $GRAFANA_ADMIN_PASSWORD "
336
- if [ " $grafanaPwd " = " " ]; then
336
+ if [ " $grafanaPwd " == " " ]; then
337
337
log_debug " Generating random Grafana admin password"
338
338
showPass=" true"
339
339
grafanaPwd=" $( randomPassword) "
@@ -342,14 +342,14 @@ else
342
342
fi
343
343
344
344
# See https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#from-21x-to-22x
345
- if [ " $V4M_CURRENT_VERSION_MAJOR " = " 1" ] && [[ $V4M_CURRENT_VERSION_MINOR =~ [0-5] ]]; then
345
+ if [ " $V4M_CURRENT_VERSION_MAJOR " == " 1" ] && [[ $V4M_CURRENT_VERSION_MINOR =~ [0-5] ]]; then
346
346
kubectl delete -n " $MON_NS " --ignore-not-found \
347
347
deployments.apps \
348
348
-l app.kubernetes.io/instance=v4m-prometheus-operator,app.kubernetes.io/name=kube-state-metrics
349
349
fi
350
350
351
351
TRACING_ENABLE=" ${TRACING_ENABLE:- false} "
352
- if [ " $TRACING_ENABLE " = " false" ]; then
352
+ if [ " $TRACING_ENABLE " == " false" ]; then
353
353
tempoDSFile=$TMP_DIR /empty.yaml
354
354
else
355
355
TEMPO_USER_YAML=" ${TEMPO_USER_YAML:- $USER_DIR / monitoring/ user-values-tempo.yaml} "
@@ -415,7 +415,7 @@ enable_pod_token_automount "$MON_NS" deployment v4m-operator
415
415
log_info " Deploying ServiceMonitors and Prometheus rules"
416
416
log_verbose " Deploying cluster ServiceMonitors"
417
417
418
- if [ " $TRACING_ENABLE " = " true" ]; then
418
+ if [ " $TRACING_ENABLE " == " true" ]; then
419
419
log_info " Tracing enabled..."
420
420
421
421
# Generate yaml file with all container-related keys
@@ -448,7 +448,7 @@ if kubectl get ns "$NGINX_NS" 2> /dev/null; then
448
448
nginxFound=true
449
449
fi
450
450
451
- if [ " $nginxFound " = " true" ]; then
451
+ if [ " $nginxFound " == " true" ]; then
452
452
log_verbose " NGINX found. Deploying podMonitor to [$NGINX_NS ] namespace"
453
453
kubectl apply -n " $NGINX_NS " -f monitoring/monitors/kube/podMonitor-nginx.yaml 2> /dev/null
454
454
fi
473
473
474
474
# Elasticsearch Datasource for Grafana
475
475
LOGGING_DATASOURCE=" ${LOGGING_DATASOURCE:- false} "
476
- if [ " $LOGGING_DATASOURCE " = " true" ]; then
476
+ if [ " $LOGGING_DATASOURCE " == " true" ]; then
477
477
set +e
478
478
log_debug " Creating the logging data source using the create_logging_datasource script"
479
479
monitoring/bin/create_logging_datasource.sh
@@ -544,7 +544,7 @@ log_notice ""
544
544
# log_notice "================================================================================"
545
545
# log_notice ""
546
546
547
- if [ " $showPass " = " true" ]; then
547
+ if [ " $showPass " == " true" ]; then
548
548
# Find the grafana pod
549
549
550
550
log_notice " Generated Grafana admin password is: $grafanaPwd "
0 commit comments