Skip to content

Commit c5877ac

Browse files
committed
ran shfmt against scripts
1 parent c3b8fff commit c5877ac

7 files changed

+138
-133
lines changed

.github/workflows/ci.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ jobs:
1111
env:
1212
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
1313
SHELLCHECK_OPTS: -e SC1004 # exclude some shellcheck warnings.
14-
SHFMT_OPTS: -s # arguments to shfmt.
14+
SHFMT_OPTS: -i 2 -bn -sr -ln bash # shfmt: 2-space indentation (-i 2), braces on same line (-bn), space after redirects (-sr), bash dialect (-ln bash)
1515
with:
1616
sh_checker_only_diff: true # only run against files that were updated
1717
sh_checker_comment: true # include results as github comment

logging/bin/deploy_fluentbit_azmonitor.sh

Lines changed: 62 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@ log_debug "Script [$this_script] has started [$(date)]"
1717
FLUENT_BIT_ENABLED=${FLUENT_BIT_ENABLED:-true}
1818

1919
if [ "$FLUENT_BIT_ENABLED" != "true" ]; then
20-
log_info "Environment variable [FLUENT_BIT_ENABLED] is not set to 'true'; existing WITHOUT deploying Fluent Bit"
21-
exit 0
20+
log_info "Environment variable [FLUENT_BIT_ENABLED] is not set to 'true'; existing WITHOUT deploying Fluent Bit"
21+
exit 0
2222
fi
2323

2424
set -e
@@ -39,7 +39,7 @@ helm2ReleaseCheck "fb-$LOG_NS"
3939
helmRepoAdd fluent https://fluent.github.io/helm-charts
4040

4141
# Confirm namespace exists
42-
if [ "$(kubectl get ns "$LOG_NS" -o name 2>/dev/null)" == "" ]; then
42+
if [ "$(kubectl get ns "$LOG_NS" -o name 2> /dev/null)" == "" ]; then
4343
log_error "The specified namespace [$LOG_NS] does not exist."
4444
exit 1
4545
fi
@@ -62,55 +62,55 @@ if [ ! -f "$FB_AZMONITOR_USER_YAML" ]; then
6262
fi
6363

6464
if [ -f "$USER_DIR/logging/fluent-bit_config.configmap_azmonitor.yaml" ]; then
65-
# use copy in USER_DIR
66-
FB_CONFIGMAP="$USER_DIR/logging/fluent-bit_config.configmap_azmonitor.yaml"
65+
# use copy in USER_DIR
66+
FB_CONFIGMAP="$USER_DIR/logging/fluent-bit_config.configmap_azmonitor.yaml"
6767
else
68-
# use copy in repo
69-
FB_CONFIGMAP="logging/fb/fluent-bit_config.configmap_azmonitor.yaml"
68+
# use copy in repo
69+
FB_CONFIGMAP="logging/fb/fluent-bit_config.configmap_azmonitor.yaml"
7070
fi
7171
log_info "Using FB ConfigMap: $FB_CONFIGMAP"
7272

7373
# Check/Create Connection Info Secret
74-
if [ "$(kubectl -n "$LOG_NS" get secret connection-info-azmonitor -o name 2>/dev/null)" == "" ]; then
75-
76-
export AZMONITOR_CUSTOMER_ID="${AZMONITOR_CUSTOMER_ID:-NotProvided}"
77-
export AZMONITOR_SHARED_KEY="${AZMONITOR_SHARED_KEY:-NotProvided}"
78-
79-
if [ "$AZMONITOR_CUSTOMER_ID" != "NotProvided" ] && [ "$AZMONITOR_SHARED_KEY" != "NotProvided" ]; then
80-
log_info "Creating secret [connection-info-azmonitor] in [$LOG_NS] namespace to hold Azure connection information."
81-
kubectl -n "$LOG_NS" create secret generic connection-info-azmonitor --from-literal=customer_id="$AZMONITOR_CUSTOMER_ID" --from-literal=shared_key="$AZMONITOR_SHARED_KEY"
82-
else
83-
log_error "Unable to create secret [$LOG_NS/connection-info-azmonitor] because missing required information: [AZMONITOR_CUSTOMER_ID: $AZMONITOR_CUSTOMER_ID ; AZMONITOR_SHARED_KEY: $AZMONITOR_SHARED_KEY]."
84-
log_error "You must provide this information via environment variables or create the secret [connection-info-azmonitor] before running this script."
85-
exit 1
86-
fi
74+
if [ "$(kubectl -n "$LOG_NS" get secret connection-info-azmonitor -o name 2> /dev/null)" == "" ]; then
75+
76+
export AZMONITOR_CUSTOMER_ID="${AZMONITOR_CUSTOMER_ID:-NotProvided}"
77+
export AZMONITOR_SHARED_KEY="${AZMONITOR_SHARED_KEY:-NotProvided}"
78+
79+
if [ "$AZMONITOR_CUSTOMER_ID" != "NotProvided" ] && [ "$AZMONITOR_SHARED_KEY" != "NotProvided" ]; then
80+
log_info "Creating secret [connection-info-azmonitor] in [$LOG_NS] namespace to hold Azure connection information."
81+
kubectl -n "$LOG_NS" create secret generic connection-info-azmonitor --from-literal=customer_id="$AZMONITOR_CUSTOMER_ID" --from-literal=shared_key="$AZMONITOR_SHARED_KEY"
82+
else
83+
log_error "Unable to create secret [$LOG_NS/connection-info-azmonitor] because missing required information: [AZMONITOR_CUSTOMER_ID: $AZMONITOR_CUSTOMER_ID ; AZMONITOR_SHARED_KEY: $AZMONITOR_SHARED_KEY]."
84+
log_error "You must provide this information via environment variables or create the secret [connection-info-azmonitor] before running this script."
85+
exit 1
86+
fi
8787
else
88-
log_info "Obtaining connection information from existing secret [$LOG_NS/connection-info-azmonitor]"
89-
# Fix SC2155: Declare and assign separately
90-
AZMONITOR_CUSTOMER_ID=$(kubectl -n "$LOG_NS" get secret connection-info-azmonitor -o=jsonpath="{.data.customer_id}" | base64 --decode)
91-
export AZMONITOR_CUSTOMER_ID
92-
AZMONITOR_SHARED_KEY=$(kubectl -n "$LOG_NS" get secret connection-info-azmonitor -o=jsonpath="{.data.shared_key}" | base64 --decode)
93-
export AZMONITOR_SHARED_KEY
88+
log_info "Obtaining connection information from existing secret [$LOG_NS/connection-info-azmonitor]"
89+
# Fix SC2155: Declare and assign separately
90+
AZMONITOR_CUSTOMER_ID=$(kubectl -n "$LOG_NS" get secret connection-info-azmonitor -o=jsonpath="{.data.customer_id}" | base64 --decode)
91+
export AZMONITOR_CUSTOMER_ID
92+
AZMONITOR_SHARED_KEY=$(kubectl -n "$LOG_NS" get secret connection-info-azmonitor -o=jsonpath="{.data.shared_key}" | base64 --decode)
93+
export AZMONITOR_SHARED_KEY
9494
fi
9595

9696
# Check for an existing Helm release of stable/fluent-bit
9797
if helm3ReleaseExists fbaz "$LOG_NS"; then
98-
log_info "Removing an existing release of deprecated stable/fluent-bit Helm chart from from the [$LOG_NS] namespace [$(date)]"
99-
helm "$helmDebug" delete -n "$LOG_NS" fbaz
100-
101-
# Fix SC2155: Declare and assign separately
102-
num_service_monitors_v2=$(kubectl get servicemonitors -A | grep -c fluent-bit-v2 || true)
103-
if [ "$num_service_monitors_v2" -ge 1 ]; then
104-
log_debug "Updated serviceMonitor [fluent-bit-v2] appears to be deployed."
105-
else
106-
num_service_monitors=$(kubectl get servicemonitors -A | grep -c fluent-bit || true)
107-
if [ "$num_service_monitors" -ge 1 ]; then
108-
log_warn "You appear to have an obsolete service monitor in place for monitoring Fluent Bit."
109-
log_warn "Run monitoring/bin/deploy_monitoring_cluster.sh to deploy the current set of service monitors."
110-
fi
111-
fi
98+
log_info "Removing an existing release of deprecated stable/fluent-bit Helm chart from from the [$LOG_NS] namespace [$(date)]"
99+
helm "$helmDebug" delete -n "$LOG_NS" fbaz
100+
101+
# Fix SC2155: Declare and assign separately
102+
num_service_monitors_v2=$(kubectl get servicemonitors -A | grep -c fluent-bit-v2 || true)
103+
if [ "$num_service_monitors_v2" -ge 1 ]; then
104+
log_debug "Updated serviceMonitor [fluent-bit-v2] appears to be deployed."
105+
else
106+
num_service_monitors=$(kubectl get servicemonitors -A | grep -c fluent-bit || true)
107+
if [ "$num_service_monitors" -ge 1 ]; then
108+
log_warn "You appear to have an obsolete service monitor in place for monitoring Fluent Bit."
109+
log_warn "Run monitoring/bin/deploy_monitoring_cluster.sh to deploy the current set of service monitors."
110+
fi
111+
fi
112112
else
113-
log_debug "No existing release of the deprecated stable/fluent-bit Helm chart was found"
113+
log_debug "No existing release of the deprecated stable/fluent-bit Helm chart was found"
114114
fi
115115

116116
# Multiline parser setup
@@ -147,39 +147,39 @@ fi
147147
# Check for Kubernetes container runtime log format info
148148
KUBERNETES_RUNTIME_LOGFMT="${KUBERNETES_RUNTIME_LOGFMT:-}"
149149
if [ -z "$KUBERNETES_RUNTIME_LOGFMT" ]; then
150-
# Fix SC2155: Declare and assign separately
151-
somenode=$(kubectl get nodes | awk 'NR==2 { print $1 }')
152-
runtime=$(kubectl get node "$somenode" -o "jsonpath={.status.nodeInfo.containerRuntimeVersion}" | awk -F: '{print $1}')
153-
log_debug "Kubernetes container runtime [$runtime] found on node [$somenode]"
154-
case $runtime in
155-
docker)
156-
KUBERNETES_RUNTIME_LOGFMT="docker"
157-
;;
158-
containerd|cri-o)
159-
KUBERNETES_RUNTIME_LOGFMT="criwithlog"
160-
;;
161-
*)
162-
log_warn "Unrecognized Kubernetes container runtime [$runtime]; using default parser"
163-
KUBERNETES_RUNTIME_LOGFMT="docker"
164-
;;
165-
esac
150+
# Fix SC2155: Declare and assign separately
151+
somenode=$(kubectl get nodes | awk 'NR==2 { print $1 }')
152+
runtime=$(kubectl get node "$somenode" -o "jsonpath={.status.nodeInfo.containerRuntimeVersion}" | awk -F: '{print $1}')
153+
log_debug "Kubernetes container runtime [$runtime] found on node [$somenode]"
154+
case $runtime in
155+
docker)
156+
KUBERNETES_RUNTIME_LOGFMT="docker"
157+
;;
158+
containerd | cri-o)
159+
KUBERNETES_RUNTIME_LOGFMT="criwithlog"
160+
;;
161+
*)
162+
log_warn "Unrecognized Kubernetes container runtime [$runtime]; using default parser"
163+
KUBERNETES_RUNTIME_LOGFMT="docker"
164+
;;
165+
esac
166166
fi
167167

168168
MON_NS="${MON_NS:-monitoring}"
169169

170170
# Create ConfigMap containing Kubernetes container runtime log format
171171
kubectl -n "$LOG_NS" delete configmap fbaz-env-vars --ignore-not-found
172172
kubectl -n "$LOG_NS" create configmap fbaz-env-vars \
173-
--from-literal=KUBERNETES_RUNTIME_LOGFMT="$KUBERNETES_RUNTIME_LOGFMT" \
174-
--from-literal=LOG_MULTILINE_PARSER="${LOG_MULTILINE_PARSER}" \
175-
--from-literal=MON_NS="${MON_NS}"
173+
--from-literal=KUBERNETES_RUNTIME_LOGFMT="$KUBERNETES_RUNTIME_LOGFMT" \
174+
--from-literal=LOG_MULTILINE_PARSER="${LOG_MULTILINE_PARSER}" \
175+
--from-literal=MON_NS="${MON_NS}"
176176

177177
kubectl -n "$LOG_NS" label configmap fbaz-env-vars managed-by=v4m-es-script
178178

179179
# Check to see if we are upgrading from earlier version requiring root access
180180
if [ "$(kubectl -n "$LOG_NS" get configmap fbaz-dbmigrate-script -o name --ignore-not-found)" != "configmap/fbaz-dbmigrate-script" ]; then
181-
log_debug "Removing FB pods (if they exist) to allow migration."
182-
kubectl -n "$LOG_NS" delete daemonset v4m-fbaz --ignore-not-found
181+
log_debug "Removing FB pods (if they exist) to allow migration."
182+
kubectl -n "$LOG_NS" delete daemonset v4m-fbaz --ignore-not-found
183183
fi
184184

185185
# Create ConfigMap containing Fluent Bit database migration script

logging/bin/deploy_fluentbit_k8sevents_opensearch.sh

Lines changed: 18 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@ log_debug "Script [$this_script] has started [$(date)]"
1717
FLUENT_BIT_EVENTS_ENABLED=${FLUENT_BIT_EVENTS_ENABLED:-true}
1818

1919
if [ "$FLUENT_BIT_EVENTS_ENABLED" != "true" ]; then
20-
log_info "Environment variable [FLUENT_BIT_EVENTS_ENABLED] is not set to 'true'; exiting WITHOUT deploying Fluent Bit deployment"
21-
exit 0
20+
log_info "Environment variable [FLUENT_BIT_EVENTS_ENABLED] is not set to 'true'; exiting WITHOUT deploying Fluent Bit deployment"
21+
exit 0
2222
fi
2323

2424
set -e
@@ -33,30 +33,32 @@ log_info "Deploying Fluent Bit for collecting Kubernetes Events..."
3333
# Remove an existing Event Routher deployment?
3434
REMOVE_EVENTROUTER=${REMOVE_EVENTROUTER:-true}
3535
if [ "$REMOVE_EVENTROUTER" == "true" ]; then
36-
if [ "$(kubectl get deployment -n "$LOG_NS" -o name -l app=eventrouter 2>/dev/null)" == "" ]; then
37-
log_debug "No existing instance of Event Router found in namespace [$LOG_NS]."
38-
else
39-
log_debug "Removing an existing instance of Event Router found in namespace [$LOG_NS]."
40-
logging/bin/remove_eventrouter.sh
41-
fi
36+
if [ "$(kubectl get deployment -n "$LOG_NS" -o name -l app=eventrouter 2> /dev/null)" == "" ]; then
37+
log_debug "No existing instance of Event Router found in namespace [$LOG_NS]."
38+
else
39+
log_debug "Removing an existing instance of Event Router found in namespace [$LOG_NS]."
40+
logging/bin/remove_eventrouter.sh
41+
fi
4242
fi
4343

44-
4544
# check for pre-reqs
4645
# Confirm namespace exists
47-
if [ "$(kubectl get ns "$LOG_NS" -o name 2>/dev/null)" == "" ]; then
46+
if [ "$(kubectl get ns "$LOG_NS" -o name 2> /dev/null)" == "" ]; then
4847
log_error "Namespace [$LOG_NS] does NOT exist."
4948
exit 1
5049
fi
5150

5251
# get credentials
53-
if [ "$(kubectl -n "$LOG_NS" get secret internal-user-logcollector -o name 2>/dev/null)" == "" ]; then
54-
export ES_LOGCOLLECTOR_PASSWD=${ES_LOGCOLLECTOR_PASSWD}
55-
create_user_secret internal-user-logcollector logcollector "$ES_LOGCOLLECTOR_PASSWD" managed-by=v4m-es-script
52+
if [ "$(kubectl -n "$LOG_NS" get secret internal-user-logcollector -o name 2> /dev/null)" == "" ]; then
53+
export ES_LOGCOLLECTOR_PASSWD=${ES_LOGCOLLECTOR_PASSWD}
54+
create_user_secret internal-user-logcollector logcollector "$ES_LOGCOLLECTOR_PASSWD" managed-by=v4m-es-script
5655
else
57-
get_credentials_from_secret logcollector
58-
rc=$?
59-
if [ "$rc" != "0" ] ;then log_debug "RC=$rc"; exit "$rc";fi
56+
get_credentials_from_secret logcollector
57+
rc=$?
58+
if [ "$rc" != "0" ]; then
59+
log_debug "RC=$rc"
60+
exit "$rc"
61+
fi
6062
fi
6163

6264
HELM_DEBUG="${HELM_DEBUG:-false}"
@@ -65,7 +67,6 @@ if [ "$HELM_DEBUG" == "true" ]; then
6567
helmDebug="--debug"
6668
fi
6769

68-
6970
helmRepoAdd fluent https://fluent.github.io/helm-charts
7071

7172
#Generate yaml file with all container-related keys
@@ -78,7 +79,6 @@ if [ ! -f "$FB_EVENTS_USER_YAML" ]; then
7879
FB_EVENTS_USER_YAML=$TMP_DIR/empty.yaml
7980
fi
8081

81-
8282
# Point to OpenShift response file or dummy as appropriate
8383
openshiftValuesFile="$TMP_DIR/empty.yaml"
8484
if [ "$OPENSHIFT_CLUSTER" == "true" ]; then
@@ -88,7 +88,6 @@ else
8888
log_debug "Fluent Bit is NOT being deployed on OpenShift cluster"
8989
fi
9090

91-
9291
## Get Helm Chart Name
9392
log_debug "Fluent Bit Helm Chart: repo [$FLUENTBIT_HELM_CHART_REPO] name [$FLUENTBIT_HELM_CHART_NAME] version [$FLUENTBIT_HELM_CHART_VERSION]"
9493
chart2install="$(get_helmchart_reference "$FLUENTBIT_HELM_CHART_REPO" "$FLUENTBIT_HELM_CHART_NAME" "$FLUENTBIT_HELM_CHART_VERSION")"

0 commit comments

Comments
 (0)