Skip to content

Commit 80ada96

Browse files
authored
Merge pull request kubernetes#88349 from BenTheElder/shell-dump
fix cluster/log-dump/log-dump.sh shellcheck failures
2 parents eb1fa66 + 7427016 commit 80ada96

File tree

2 files changed

+40
-37
lines changed

2 files changed

+40
-37
lines changed

cluster/log-dump/log-dump.sh

Lines changed: 40 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ readonly max_dump_processes=25
6767
function setup() {
6868
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
6969
if [[ -z "${use_custom_instance_list}" ]]; then
70-
: ${KUBE_CONFIG_FILE:="config-test.sh"}
70+
: "${KUBE_CONFIG_FILE:="config-test.sh"}"
7171
echo "Sourcing kube-util.sh"
7272
source "${KUBE_ROOT}/cluster/kube-util.sh"
7373
echo "Detecting project"
@@ -89,7 +89,7 @@ function setup() {
8989
}
9090

9191
function log-dump-ssh() {
92-
if [[ "${gcloud_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
92+
if [[ "${gcloud_supported_providers}" =~ ${KUBERNETES_PROVIDER} ]]; then
9393
ssh-to-node "$@"
9494
return
9595
fi
@@ -106,7 +106,7 @@ function log-dump-ssh() {
106106
function copy-logs-from-node() {
107107
local -r node="${1}"
108108
local -r dir="${2}"
109-
local files=( ${3} )
109+
while IFS= read -r line; do files+=("$line"); done <<< "$3"
110110
# Append "*"
111111
# The * at the end is needed to also copy rotated logs (which happens
112112
# in large clusters and long runs).
@@ -116,12 +116,13 @@ function copy-logs-from-node() {
116116
# Comma delimit (even the singleton, or scp does the wrong thing), surround by braces.
117117
local -r scp_files="{$(printf "%s," "${files[@]}")}"
118118

119-
if [[ "${gcloud_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
119+
if [[ "${gcloud_supported_providers}" =~ ${KUBERNETES_PROVIDER} ]]; then
120120
# get-serial-port-output lets you ask for ports 1-4, but currently (11/21/2016) only port 1 contains useful information
121121
gcloud compute instances get-serial-port-output --project "${PROJECT}" --zone "${ZONE}" --port 1 "${node}" > "${dir}/serial-1.log" || true
122122
gcloud compute scp --recurse --project "${PROJECT}" --zone "${ZONE}" "${node}:${scp_files}" "${dir}" > /dev/null || true
123123
elif [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
124-
local ip=$(get_ssh_hostname "${node}")
124+
local ip
125+
ip=$(get_ssh_hostname "${node}")
125126
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "${SSH_USER}@${ip}:${scp_files}" "${dir}" > /dev/null || true
126127
elif [[ -n "${use_custom_instance_list}" ]]; then
127128
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${node}:${scp_files}" "${dir}" > /dev/null || true
@@ -138,26 +139,27 @@ function copy-logs-from-node() {
138139
function save-logs() {
139140
local -r node_name="${1}"
140141
local -r dir="${2}"
141-
local files="${3}"
142+
local save_files="${3}"
142143
local opt_systemd_services="${4:-""}"
143144
local on_master="${5:-"false"}"
144145

145-
files="${files} ${extra_log_files}"
146+
save_files="${save_files} ${extra_log_files}"
146147
if [[ -n "${use_custom_instance_list}" ]]; then
147148
if [[ -n "${LOG_DUMP_SAVE_LOGS:-}" ]]; then
148-
files="${files} ${LOG_DUMP_SAVE_LOGS:-}"
149+
save_files="${save_files} ${LOG_DUMP_SAVE_LOGS:-}"
149150
fi
150151
else
151152
case "${KUBERNETES_PROVIDER}" in
152153
gce|gke)
153-
files="${files} ${gce_logfiles}"
154+
save_files="${save_files} ${gce_logfiles}"
154155
;;
155156
aws)
156-
files="${files} ${aws_logfiles}"
157+
save_files="${save_files} ${aws_logfiles}"
157158
;;
158159
esac
159160
fi
160-
local -r services=( ${systemd_services} ${opt_systemd_services} ${extra_systemd_services} )
161+
162+
read -r -a services <<< "${systemd_services} ${opt_systemd_services} ${extra_systemd_services}"
161163

162164
if log-dump-ssh "${node_name}" "command -v journalctl" &> /dev/null; then
163165
if [[ "${on_master}" == "true" ]]; then
@@ -177,7 +179,7 @@ function save-logs() {
177179
log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise" > "${dir}/systemd.log" || true
178180
fi
179181
else
180-
files="${kern_logfile} ${files} ${initd_logfiles} ${supervisord_logfiles}"
182+
save_files="${kern_logfile} ${save_files} ${initd_logfiles} ${supervisord_logfiles}"
181183
fi
182184

183185
# Try dumping coverage profiles, if it looks like coverage is enabled in the first place.
@@ -198,8 +200,8 @@ function save-logs() {
198200
echo "Changing logfiles to be world-readable for download"
199201
log-dump-ssh "${node_name}" "sudo chmod -R a+r /var/log" || true
200202

201-
echo "Copying '${files}' from ${node_name}"
202-
copy-logs-from-node "${node_name}" "${dir}" "${files}"
203+
echo "Copying '${save_files}' from ${node_name}"
204+
copy-logs-from-node "${node_name}" "${dir}" "${save_files}"
203205
}
204206

205207
# Saves a copy of the Windows Docker event log to ${WINDOWS_LOGS_DIR}\docker.log
@@ -245,8 +247,9 @@ function save-windows-logs-via-diagnostics-tool() {
245247
local node="${1}"
246248
local dest_dir="${2}"
247249

248-
gcloud compute instances add-metadata ${node} --metadata enable-diagnostics=true --project=${PROJECT} --zone=${ZONE}
249-
local logs_archive_in_gcs=$(gcloud alpha compute diagnose export-logs ${node} --zone=${ZONE} --project=${PROJECT} | tail -n 1)
250+
gcloud compute instances add-metadata "${node}" --metadata enable-diagnostics=true "--project=${PROJECT}" "--zone=${ZONE}"
251+
local logs_archive_in_gcs
252+
logs_archive_in_gcs=$(gcloud alpha compute diagnose export-logs "${node}" "--zone=${ZONE}" "--project=${PROJECT}" | tail -n 1)
250253
local temp_local_path="${node}.zip"
251254
for retry in {1..20}; do
252255
if gsutil mv "${logs_archive_in_gcs}" "${temp_local_path}" > /dev/null 2>&1; then
@@ -258,8 +261,8 @@ function save-windows-logs-via-diagnostics-tool() {
258261
done
259262

260263
if [[ -f "${temp_local_path}" ]]; then
261-
unzip ${temp_local_path} -d "${dest_dir}" > /dev/null
262-
rm -f ${temp_local_path}
264+
unzip "${temp_local_path}" -d "${dest_dir}" > /dev/null
265+
rm -f "${temp_local_path}"
263266
fi
264267
}
265268

@@ -272,14 +275,14 @@ function save-windows-logs-via-ssh() {
272275
export-windows-docker-images-list "${node}"
273276

274277
local remote_files=()
275-
for file in ${windows_node_logfiles[@]}; do
278+
for file in "${windows_node_logfiles[@]}"; do
276279
remote_files+=( "${WINDOWS_LOGS_DIR}\\${file}" )
277280
done
278281
remote_files+=( "${windows_node_otherfiles[@]}" )
279282

280283
# TODO(pjh, yujuhong): handle rotated logs and copying multiple files at the
281284
# same time.
282-
for remote_file in ${remote_files[@]}; do
285+
for remote_file in "${remote_files[@]}"; do
283286
# Retry up to 3 times to allow ssh keys to be properly propagated and
284287
# stored.
285288
for retry in {1..3}; do
@@ -301,7 +304,7 @@ function save-logs-windows() {
301304
local -r node="${1}"
302305
local -r dest_dir="${2}"
303306

304-
if [[ ! "${gcloud_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
307+
if [[ ! "${gcloud_supported_providers}" =~ ${KUBERNETES_PROVIDER} ]]; then
305308
echo "Not saving logs for ${node}, Windows log dumping requires gcloud support"
306309
return
307310
fi
@@ -323,14 +326,14 @@ function run-in-docker-container() {
323326
local node_name="$1"
324327
local container="$2"
325328
shift 2
326-
log-dump-ssh "${node_name}" "docker exec \"\$(docker ps -f label=io.kubernetes.container.name=${container} --format \"{{.ID}}\")\" $@"
329+
log-dump-ssh "${node_name}" "docker exec \"\$(docker ps -f label=io.kubernetes.container.name=${container} --format \"{{.ID}}\")\" $*"
327330
}
328331

329332
function dump_masters() {
330333
local master_names
331334
if [[ -n "${use_custom_instance_list}" ]]; then
332-
master_names=( $(log_dump_custom_get_instances master) )
333-
elif [[ ! "${master_ssh_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
335+
while IFS='' read -r line; do master_names+=("$line"); done < <(log_dump_custom_get_instances master)
336+
elif [[ ! "${master_ssh_supported_providers}" =~ ${KUBERNETES_PROVIDER} ]]; then
334337
echo "Master SSH not supported for ${KUBERNETES_PROVIDER}"
335338
return
336339
elif [[ -n "${KUBEMARK_MASTER_NAME:-}" ]]; then
@@ -381,8 +384,8 @@ function dump_nodes() {
381384
node_names=( "$@" )
382385
elif [[ -n "${use_custom_instance_list}" ]]; then
383386
echo "Dumping logs for nodes provided by log_dump_custom_get_instances() function"
384-
node_names=( $(log_dump_custom_get_instances node) )
385-
elif [[ ! "${node_ssh_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
387+
while IFS='' read -r line; do node_names+=("$line"); done < <(log_dump_custom_get_instances node)
388+
elif [[ ! "${node_ssh_supported_providers}" =~ ${KUBERNETES_PROVIDER} ]]; then
386389
echo "Node SSH not supported for ${KUBERNETES_PROVIDER}"
387390
return
388391
else
@@ -409,7 +412,7 @@ function dump_nodes() {
409412
linux_nodes_selected_for_logs=()
410413
if [[ -n "${LOGDUMP_ONLY_N_RANDOM_NODES:-}" ]]; then
411414
# We randomly choose 'LOGDUMP_ONLY_N_RANDOM_NODES' many nodes for fetching logs.
412-
for index in `shuf -i 0-$(( ${#node_names[*]} - 1 )) -n ${LOGDUMP_ONLY_N_RANDOM_NODES}`
415+
for index in $(shuf -i 0-$(( ${#node_names[*]} - 1 )) -n "${LOGDUMP_ONLY_N_RANDOM_NODES}")
413416
do
414417
linux_nodes_selected_for_logs+=("${node_names[$index]}")
415418
done
@@ -475,7 +478,7 @@ function find_non_logexported_nodes() {
475478
echo "Successfully listed marker files for successful nodes"
476479
NON_LOGEXPORTED_NODES=()
477480
for node in "${NODE_NAMES[@]}"; do
478-
if [[ ! "${succeeded_nodes}" =~ "${node}" ]]; then
481+
if [[ ! "${succeeded_nodes}" =~ ${node} ]]; then
479482
NON_LOGEXPORTED_NODES+=("${node}")
480483
fi
481484
done
@@ -486,7 +489,8 @@ function find_non_logexported_nodes() {
486489
function dump_nodes_with_logexporter() {
487490
if [[ -n "${use_custom_instance_list}" ]]; then
488491
echo "Dumping logs for nodes provided by log_dump_custom_get_instances() function"
489-
NODE_NAMES=( $(log_dump_custom_get_instances node) )
492+
NODE_NAMES=()
493+
while IFS='' read -r line; do NODE_NAMES+=("$line"); done < <(log_dump_custom_get_instances node)
490494
else
491495
echo "Detecting nodes in the cluster"
492496
detect-node-names &> /dev/null
@@ -498,7 +502,7 @@ function dump_nodes_with_logexporter() {
498502
fi
499503

500504
# Obtain parameters required by logexporter.
501-
local -r service_account_credentials="$(cat ${GOOGLE_APPLICATION_CREDENTIALS} | base64 | tr -d '\n')"
505+
local -r service_account_credentials="$(base64 <<< "${GOOGLE_APPLICATION_CREDENTIALS}" | tr -d '\n')"
502506
local -r cloud_provider="${KUBERNETES_PROVIDER}"
503507
local -r enable_hollow_node_logs="${ENABLE_HOLLOW_NODE_LOGS:-false}"
504508
local -r logexport_sleep_seconds="$(( 90 + NUM_NODES / 3 ))"
@@ -559,7 +563,6 @@ function dump_nodes_with_logexporter() {
559563
done; wait)
560564

561565
# List registry of marker files (of nodes whose logexporter succeeded) from GCS.
562-
local nodes_succeeded
563566
for retry in {1..10}; do
564567
if find_non_logexported_nodes; then
565568
break
@@ -589,28 +592,29 @@ function dump_nodes_with_logexporter() {
589592
"${KUBECTL}" get pods --namespace "${logexporter_namespace}" || true
590593
"${KUBECTL}" delete namespace "${logexporter_namespace}" || true
591594
if [[ "${#failed_nodes[@]}" != 0 ]]; then
592-
echo -e "Dumping logs through SSH for the following nodes:\n${failed_nodes[@]}"
595+
echo -e "Dumping logs through SSH for the following nodes:\n${failed_nodes[*]}"
593596
dump_nodes "${failed_nodes[@]}"
594597
fi
595598
}
596599

597600
function detect_node_failures() {
598-
if ! [[ "${gcloud_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
601+
if ! [[ "${gcloud_supported_providers}" =~ ${KUBERNETES_PROVIDER} ]]; then
599602
return
600603
fi
601604

602605
detect-node-names
603606
if [[ "${KUBERNETES_PROVIDER}" == "gce" ]]; then
604-
local all_instance_groups=(${INSTANCE_GROUPS[@]} ${WINDOWS_INSTANCE_GROUPS[@]})
607+
local all_instance_groups=("${INSTANCE_GROUPS[@]}" "${WINDOWS_INSTANCE_GROUPS[@]}")
605608
else
606-
local all_instance_groups=(${INSTANCE_GROUPS[@]})
609+
local all_instance_groups=("${INSTANCE_GROUPS[@]}")
607610
fi
608611

609612
if [ -z "${all_instance_groups:-}" ]; then
610613
return
611614
fi
612615
for group in "${all_instance_groups[@]}"; do
613-
local creation_timestamp=$(gcloud compute instance-groups managed describe \
616+
local creation_timestamp
617+
creation_timestamp=$(gcloud compute instance-groups managed describe \
614618
"${group}" \
615619
--project "${PROJECT}" \
616620
--zone "${ZONE}" \

hack/.shellcheck_failures

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
11
./cluster/gce/gci/configure.sh
22
./cluster/gce/gci/master-helper.sh
33
./cluster/gce/util.sh
4-
./cluster/log-dump/log-dump.sh

0 commit comments

Comments
 (0)