Skip to content

Commit c45bdb4

Browse files
committed
update comment and change back to double equal
1 parent 88a8790 commit c45bdb4

10 files changed

+66
-66
lines changed

.shellcheckrc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,5 @@
22
disable=SC1091
33
# Disables the check for expanding an array, as it predictably gives first element
44
disable=SC2128
5-
# Disbale check for var not set in same file
5+
# Disabales check for var not set in same file
66
disable=SC2154

monitoring/bin/common.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,10 @@ if [ "$SAS_MONITORING_COMMON_SOURCED" = "" ]; then
2121
export MON_NS="${MON_NS:-monitoring}"
2222
export TLS_ENABLE="${MON_TLS_ENABLE:-${TLS_ENABLE:-true}}"
2323

24-
if [ "$AIRGAP_DEPLOYMENT" = "true" ]; then
24+
if [ "$AIRGAP_DEPLOYMENT" == "true" ]; then
2525

2626
#Special processing to handle Viya-level deployment script
27-
if [ "$(basename "$0")" = "deploy_monitoring_viya.sh" ]; then
27+
if [ "$(basename "$0")" == "deploy_monitoring_viya.sh" ]; then
2828
V4M_NS="$VIYA_NS"
2929
else
3030
export V4M_NS=$MON_NS

monitoring/bin/create_logging_datasource.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ log_debug "Grafana Pod [$grafanaPod]"
7979
pluginInstalled=$(kubectl exec -n "$MON_NS" "$grafanaPod" -- bash -c "grafana cli plugins ls |grep -c opensearch-datasource|| true")
8080
log_debug "Grafana OpenSearch Datasource Plugin installed? [$pluginInstalled]"
8181

82-
if [ "$pluginInstalled" = "0" ]; then
82+
if [ "$pluginInstalled" == "0" ]; then
8383

8484
log_info "Installing OpenSearch Datasource plugin"
8585
pluginVersion="${GRAFANA_DATASOURCE_PLUGIN_VERSION:-2.17.4}"

monitoring/bin/deploy_dashboards.sh

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -76,52 +76,52 @@ if [ "$1" != "" ]; then
7676
fi
7777

7878
log_info "Deploying dashboards to the [$DASH_NS] namespace ..."
79-
if [ "$WELCOME_DASH" = "true" ]; then
79+
if [ "$WELCOME_DASH" == "true" ]; then
8080
log_verbose "Deploying welcome dashboards"
8181
deploy_dashboards "welcome"
8282
fi
8383

84-
if [ "$KUBE_DASH" = "true" ]; then
84+
if [ "$KUBE_DASH" == "true" ]; then
8585
log_verbose "Deploying Kubernetes cluster dashboards"
8686
deploy_dashboards "kube"
8787
fi
8888

89-
if [ "$ISTIO_DASH" = "true" ]; then
89+
if [ "$ISTIO_DASH" == "true" ]; then
9090
log_verbose "Deploying Istio dashboards"
9191
deploy_dashboards "istio"
9292
fi
9393

94-
if [ "$LOGGING_DASH" = "true" ]; then
94+
if [ "$LOGGING_DASH" == "true" ]; then
9595
log_verbose "Deploying Logging dashboards"
9696
deploy_dashboards "logging"
9797
fi
9898

99-
if [ "$VIYA_DASH" = "true" ]; then
99+
if [ "$VIYA_DASH" == "true" ]; then
100100
log_verbose "Deploying SAS Viya dashboards"
101101
deploy_dashboards "viya"
102102
fi
103103

104-
if [ "$VIYA_LOGS_DASH" = "true" ]; then
104+
if [ "$VIYA_LOGS_DASH" == "true" ]; then
105105
log_verbose "Deploying SAS Viya dashboards with log support"
106106
deploy_dashboards "viya-logs"
107107
fi
108108

109-
if [ "$PGMONITOR_DASH" = "true" ]; then
109+
if [ "$PGMONITOR_DASH" == "true" ]; then
110110
log_verbose "Deploying Postgres dashboards"
111111
deploy_dashboards "pgmonitor"
112112
fi
113113

114-
if [ "$RABBITMQ_DASH" = "true" ]; then
114+
if [ "$RABBITMQ_DASH" == "true" ]; then
115115
log_verbose "Deploying RabbitMQ dashboards"
116116
deploy_dashboards "rabbitmq"
117117
fi
118118

119-
if [ "$NGINX_DASH" = "true" ]; then
119+
if [ "$NGINX_DASH" == "true" ]; then
120120
log_verbose "Deploying NGINX dashboards"
121121
deploy_dashboards "nginx"
122122
fi
123123

124-
if [ "$USER_DASH" = "true" ]; then
124+
if [ "$USER_DASH" == "true" ]; then
125125
userDashDir="$USER_DIR/monitoring/dashboards"
126126
if [ -d "$userDashDir" ]; then
127127
log_verbose "Deploying user dashboards from [$userDashDir]"

monitoring/bin/deploy_monitoring_cluster.sh

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ source monitoring/bin/common.sh
88
source bin/service-url-include.sh
99
source bin/autogenerate-include.sh
1010

11-
if [ "$OPENSHIFT_CLUSTER" = "true" ]; then
11+
if [ "$OPENSHIFT_CLUSTER" == "true" ]; then
1212
if [ "${CHECK_OPENSHIFT_CLUSTER:-true}" = "true" ]; then
1313
log_error "This script should not be run on OpenShift clusters"
1414
log_error "Run monitoring/bin/deploy_monitoring_openshift.sh instead"
@@ -35,7 +35,7 @@ autogeneratedYAMLFile="$TMP_DIR/autogenerate-prom-operator.yaml"
3535

3636
AUTOGENERATE_STORAGECLASS="${AUTOGENERATE_STORAGECLASS:-false}"
3737

38-
if [ "$AUTOGENERATE_STORAGECLASS" = "true" ]; then
38+
if [ "$AUTOGENERATE_STORAGECLASS" == "true" ]; then
3939

4040
if [ ! -f "$autogeneratedYAMLFile" ]; then
4141
log_debug "Creating file [$autogeneratedYAMLFile]"
@@ -65,7 +65,7 @@ if [ ! -f "$PROM_OPER_USER_YAML" ]; then
6565
PROM_OPER_USER_YAML=$TMP_DIR/empty.yaml
6666
fi
6767

68-
if [ "$HELM_DEBUG" = "true" ]; then
68+
if [ "$HELM_DEBUG" == "true" ]; then
6969
helmDebug="--debug"
7070
fi
7171

@@ -95,7 +95,7 @@ helmRepoAdd prometheus-community https://prometheus-community.github.io/helm-cha
9595

9696
istioValuesFile=$TMP_DIR/empty.yaml
9797
# Istio - Federate data from Istio's Prometheus instance
98-
if [ "$ISTIO_ENABLED" = "true" ]; then
98+
if [ "$ISTIO_ENABLED" == "true" ]; then
9999
log_verbose "Including Istio metric federation"
100100
istioValuesFile=$TMP_DIR/values-prom-operator-tmp.yaml
101101
else
@@ -105,13 +105,13 @@ fi
105105

106106
# Check if Prometheus Operator CRDs are already installed
107107
PROM_OPERATOR_CRD_UPDATE=${PROM_OPERATOR_CRD_UPDATE:-true}
108-
if [ "$PROM_OPERATOR_CRD_UPDATE" = "true" ]; then
108+
if [ "$PROM_OPERATOR_CRD_UPDATE" == "true" ]; then
109109
log_verbose "Updating Prometheus Operator custom resource definitions"
110110
crds=(alertmanagerconfigs alertmanagers prometheuses prometheusrules podmonitors servicemonitors thanosrulers probes)
111111
for crd in "${crds[@]}"; do
112112

113113
## Determine CRD URL - if in an airgap environment, look for them in USER_DIR.
114-
if [ "$AIRGAP_DEPLOYMENT" = "true" ]; then
114+
if [ "$AIRGAP_DEPLOYMENT" == "true" ]; then
115115
crdURL=$USER_DIR/monitoring/prometheus-operator-crd/$PROM_OPERATOR_CRD_VERSION/monitoring.coreos.com_$crd.yaml
116116

117117
## Fail if the CRDs could not be located.
@@ -140,7 +140,7 @@ kubectl delete daemonset -n "$MON_NS" -l app=prometheus-node-exporter --ignore-n
140140

141141
# Optional workload node placement support
142142
MON_NODE_PLACEMENT_ENABLE=${MON_NODE_PLACEMENT_ENABLE:-${NODE_PLACEMENT_ENABLE:-false}}
143-
if [ "$MON_NODE_PLACEMENT_ENABLE" = "true" ]; then
143+
if [ "$MON_NODE_PLACEMENT_ENABLE" == "true" ]; then
144144
log_verbose "Enabling monitoring components for workload node placement"
145145
wnpValuesFile="monitoring/node-placement/values-prom-operator-wnp.yaml"
146146
else
@@ -151,7 +151,7 @@ fi
151151
# Optional TLS Support
152152
tlsValuesFile=$TMP_DIR/empty.yaml
153153
tlsPromAlertingEndpointFile=$TMP_DIR/empty.yaml
154-
if [ "$TLS_ENABLE" = "true" ]; then
154+
if [ "$TLS_ENABLE" == "true" ]; then
155155
apps=(prometheus alertmanager grafana)
156156
create_tls_certs "$MON_NS" monitoring "${apps[@]}"
157157

@@ -161,7 +161,7 @@ if [ "$TLS_ENABLE" = "true" ]; then
161161

162162
log_verbose "Provisioning TLS-enabled Prometheus datasource for Grafana"
163163
grafanaDS=grafana-datasource-prom-https.yaml
164-
if [ "$MON_TLS_PATH_INGRESS" = "true" ]; then
164+
if [ "$MON_TLS_PATH_INGRESS" == "true" ]; then
165165
grafanaDS=grafana-datasource-prom-https-path.yaml
166166
tlsPromAlertingEndpointFile=monitoring/tls/prom-alertendpoint-path-https.yaml
167167
fi
@@ -179,7 +179,7 @@ fi
179179

180180
AUTOGENERATE_INGRESS="${AUTOGENERATE_INGRESS:-false}"
181181

182-
if [ "$AUTOGENERATE_INGRESS" = "true" ]; then
182+
if [ "$AUTOGENERATE_INGRESS" == "true" ]; then
183183

184184
if [ ! -f "$autogeneratedYAMLFile" ]; then
185185
log_debug "Creating file [$autogeneratedYAMLFile]"
@@ -188,7 +188,7 @@ if [ "$AUTOGENERATE_INGRESS" = "true" ]; then
188188
log_debug "File [$autogeneratedYAMLFile] already exists"
189189
fi
190190

191-
if [ "$MON_TLS_PATH_INGRESS" != "true" ] && [ "$ROUTING" = "path" ]; then
191+
if [ "$MON_TLS_PATH_INGRESS" != "true" ] && [ "$ROUTING" == "path" ]; then
192192
log_error "Environment variable MON_TLS_PATH_INGRESS must be set to 'true' when ROUTING='path'."
193193
exit 1
194194
fi
@@ -209,7 +209,7 @@ if [ "$AUTOGENERATE_INGRESS" = "true" ]; then
209209
# Don't assign the variable to itself
210210
ALERTMANAGER_PATH="${ALERTMANAGER_PATH:-alertmanager}"
211211
if [ -z "$ALERTMANAGER_FQDN" ]; then
212-
if [ "$ROUTING" = "host" ]; then
212+
if [ "$ROUTING" == "host" ]; then
213213
ALERTMANAGER_FQDN="$ALERTMANAGER_PATH.$BASE_DOMAIN"
214214
else
215215
ALERTMANAGER_FQDN="$BASE_DOMAIN"
@@ -225,7 +225,7 @@ if [ "$AUTOGENERATE_INGRESS" = "true" ]; then
225225
# Don't assign the variable to itself
226226
GRAFANA_PATH="${GRAFANA_PATH:-grafana}"
227227
if [ -z "$GRAFANA_FQDN" ]; then
228-
if [ "$ROUTING" = "host" ]; then
228+
if [ "$ROUTING" == "host" ]; then
229229
GRAFANA_FQDN="$GRAFANA_PATH.$BASE_DOMAIN"
230230
else
231231
GRAFANA_FQDN="$BASE_DOMAIN"
@@ -240,7 +240,7 @@ if [ "$AUTOGENERATE_INGRESS" = "true" ]; then
240240
# Don't assign the variable to itself
241241
PROMETHEUS_PATH="${PROMETHEUS_PATH:-prometheus}"
242242
if [ -z "$PROMETHEUS_FQDN" ]; then
243-
if [ "$ROUTING" = "host" ]; then
243+
if [ "$ROUTING" == "host" ]; then
244244
PROMETHEUS_FQDN="$PROMETHEUS_PATH.$BASE_DOMAIN"
245245
else
246246
PROMETHEUS_FQDN="$BASE_DOMAIN"
@@ -261,7 +261,7 @@ if [ "$AUTOGENERATE_INGRESS" = "true" ]; then
261261
yq -i '.prometheus.ingress.enabled=env(PROMETHEUS_INGRESS_ENABLE)' "$autogeneratedYAMLFile"
262262

263263
###hosts, paths and fqdn
264-
if [ "$ROUTING" = "host" ]; then
264+
if [ "$ROUTING" == "host" ]; then
265265
yq -i '.alertmanager.ingress.hosts.[0]=env(ALERTMANAGER_FQDN)' "$autogeneratedYAMLFile"
266266
yq -i '.alertmanager.ingress.tls.[0].hosts.[0]=env(ALERTMANAGER_FQDN)' "$autogeneratedYAMLFile"
267267
exturl="https://$ALERTMANAGER_FQDN" yq -i '.alertmanager.alertmanagerSpec.externalUrl=env(exturl)' "$autogeneratedYAMLFile"
@@ -315,7 +315,7 @@ fi
315315

316316
nodePortValuesFile=$TMP_DIR/empty.yaml
317317
PROM_NODEPORT_ENABLE=${PROM_NODEPORT_ENABLE:-false}
318-
if [ "$PROM_NODEPORT_ENABLE" = "true" ]; then
318+
if [ "$PROM_NODEPORT_ENABLE" == "true" ]; then
319319
log_debug "Enabling NodePort access for Prometheus and Alertmanager"
320320
nodePortValuesFile=monitoring/values-prom-nodeport.yaml
321321
fi
@@ -333,7 +333,7 @@ if helm3ReleaseExists "$promRelease" "$MON_NS"; then
333333
log_verbose "Upgrading via Helm ($(date) - timeout 20m)"
334334
else
335335
grafanaPwd="$GRAFANA_ADMIN_PASSWORD"
336-
if [ "$grafanaPwd" = "" ]; then
336+
if [ "$grafanaPwd" == "" ]; then
337337
log_debug "Generating random Grafana admin password"
338338
showPass="true"
339339
grafanaPwd="$(randomPassword)"
@@ -342,14 +342,14 @@ else
342342
fi
343343

344344
# See https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#from-21x-to-22x
345-
if [ "$V4M_CURRENT_VERSION_MAJOR" = "1" ] && [[ $V4M_CURRENT_VERSION_MINOR =~ [0-5] ]]; then
345+
if [ "$V4M_CURRENT_VERSION_MAJOR" == "1" ] && [[ $V4M_CURRENT_VERSION_MINOR =~ [0-5] ]]; then
346346
kubectl delete -n "$MON_NS" --ignore-not-found \
347347
deployments.apps \
348348
-l app.kubernetes.io/instance=v4m-prometheus-operator,app.kubernetes.io/name=kube-state-metrics
349349
fi
350350

351351
TRACING_ENABLE="${TRACING_ENABLE:-false}"
352-
if [ "$TRACING_ENABLE" = "false" ]; then
352+
if [ "$TRACING_ENABLE" == "false" ]; then
353353
tempoDSFile=$TMP_DIR/empty.yaml
354354
else
355355
TEMPO_USER_YAML="${TEMPO_USER_YAML:-$USER_DIR/monitoring/user-values-tempo.yaml}"
@@ -415,7 +415,7 @@ enable_pod_token_automount "$MON_NS" deployment v4m-operator
415415
log_info "Deploying ServiceMonitors and Prometheus rules"
416416
log_verbose "Deploying cluster ServiceMonitors"
417417

418-
if [ "$TRACING_ENABLE" = "true" ]; then
418+
if [ "$TRACING_ENABLE" == "true" ]; then
419419
log_info "Tracing enabled..."
420420

421421
#Generate yaml file with all container-related keys
@@ -448,7 +448,7 @@ if kubectl get ns "$NGINX_NS" 2> /dev/null; then
448448
nginxFound=true
449449
fi
450450

451-
if [ "$nginxFound" = "true" ]; then
451+
if [ "$nginxFound" == "true" ]; then
452452
log_verbose "NGINX found. Deploying podMonitor to [$NGINX_NS] namespace"
453453
kubectl apply -n "$NGINX_NS" -f monitoring/monitors/kube/podMonitor-nginx.yaml 2> /dev/null
454454
fi
@@ -473,7 +473,7 @@ done
473473

474474
# Elasticsearch Datasource for Grafana
475475
LOGGING_DATASOURCE="${LOGGING_DATASOURCE:-false}"
476-
if [ "$LOGGING_DATASOURCE" = "true" ]; then
476+
if [ "$LOGGING_DATASOURCE" == "true" ]; then
477477
set +e
478478
log_debug "Creating the logging data source using the create_logging_datasource script"
479479
monitoring/bin/create_logging_datasource.sh
@@ -544,7 +544,7 @@ log_notice ""
544544
#log_notice "================================================================================"
545545
#log_notice ""
546546

547-
if [ "$showPass" = "true" ]; then
547+
if [ "$showPass" == "true" ]; then
548548
# Find the grafana pod
549549

550550
log_notice " Generated Grafana admin password is: $grafanaPwd"

0 commit comments

Comments
 (0)