@@ -67,7 +67,7 @@ readonly max_dump_processes=25
67
67
function setup() {
68
68
KUBE_ROOT=$( dirname " ${BASH_SOURCE[0]} " ) /../..
69
69
if [[ -z " ${use_custom_instance_list} " ]]; then
70
- : ${KUBE_CONFIG_FILE:= " config-test.sh" }
70
+ : " ${KUBE_CONFIG_FILE:= " config-test.sh" } "
71
71
echo " Sourcing kube-util.sh"
72
72
source " ${KUBE_ROOT} /cluster/kube-util.sh"
73
73
echo " Detecting project"
@@ -89,7 +89,7 @@ function setup() {
89
89
}
90
90
91
91
function log-dump-ssh() {
92
- if [[ " ${gcloud_supported_providers} " =~ " ${KUBERNETES_PROVIDER} " ]]; then
92
+ if [[ " ${gcloud_supported_providers} " =~ ${KUBERNETES_PROVIDER} ]]; then
93
93
ssh-to-node " $@ "
94
94
return
95
95
fi
@@ -106,7 +106,7 @@ function log-dump-ssh() {
106
106
function copy-logs-from-node() {
107
107
local -r node=" ${1} "
108
108
local -r dir=" ${2} "
109
- local files=( ${3} )
109
+ while IFS= read -r line ; do files+=( " $line " ) ; done <<< " $3 "
110
110
# Append "*"
111
111
# The * at the end is needed to also copy rotated logs (which happens
112
112
# in large clusters and long runs).
@@ -116,12 +116,13 @@ function copy-logs-from-node() {
116
116
# Comma delimit (even the singleton, or scp does the wrong thing), surround by braces.
117
117
local -r scp_files=" {$( printf " %s," " ${files[@]} " ) }"
118
118
119
- if [[ " ${gcloud_supported_providers} " =~ " ${KUBERNETES_PROVIDER} " ]]; then
119
+ if [[ " ${gcloud_supported_providers} " =~ ${KUBERNETES_PROVIDER} ]]; then
120
120
# get-serial-port-output lets you ask for ports 1-4, but currently (11/21/2016) only port 1 contains useful information
121
121
gcloud compute instances get-serial-port-output --project " ${PROJECT} " --zone " ${ZONE} " --port 1 " ${node} " > " ${dir} /serial-1.log" || true
122
122
gcloud compute scp --recurse --project " ${PROJECT} " --zone " ${ZONE} " " ${node} :${scp_files} " " ${dir} " > /dev/null || true
123
123
elif [[ " ${KUBERNETES_PROVIDER} " == " aws" ]]; then
124
- local ip=$( get_ssh_hostname " ${node} " )
124
+ local ip
125
+ ip=$( get_ssh_hostname " ${node} " )
125
126
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i " ${AWS_SSH_KEY} " " ${SSH_USER} @${ip} :${scp_files} " " ${dir} " > /dev/null || true
126
127
elif [[ -n " ${use_custom_instance_list} " ]]; then
127
128
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i " ${LOG_DUMP_SSH_KEY} " " ${LOG_DUMP_SSH_USER} @${node} :${scp_files} " " ${dir} " > /dev/null || true
@@ -138,26 +139,27 @@ function copy-logs-from-node() {
138
139
function save-logs() {
139
140
local -r node_name=" ${1} "
140
141
local -r dir=" ${2} "
141
- local files =" ${3} "
142
+ local save_files =" ${3} "
142
143
local opt_systemd_services=" ${4:- " " } "
143
144
local on_master=" ${5:- " false" } "
144
145
145
- files =" ${files } ${extra_log_files} "
146
+ save_files =" ${save_files } ${extra_log_files} "
146
147
if [[ -n " ${use_custom_instance_list} " ]]; then
147
148
if [[ -n " ${LOG_DUMP_SAVE_LOGS:- } " ]]; then
148
- files =" ${files } ${LOG_DUMP_SAVE_LOGS:- } "
149
+ save_files =" ${save_files } ${LOG_DUMP_SAVE_LOGS:- } "
149
150
fi
150
151
else
151
152
case " ${KUBERNETES_PROVIDER} " in
152
153
gce|gke)
153
- files =" ${files } ${gce_logfiles} "
154
+ save_files =" ${save_files } ${gce_logfiles} "
154
155
;;
155
156
aws)
156
- files =" ${files } ${aws_logfiles} "
157
+ save_files =" ${save_files } ${aws_logfiles} "
157
158
;;
158
159
esac
159
160
fi
160
- local -r services=( ${systemd_services} ${opt_systemd_services} ${extra_systemd_services} )
161
+
162
+ read -r -a services <<< " ${systemd_services} ${opt_systemd_services} ${extra_systemd_services}"
161
163
162
164
if log-dump-ssh " ${node_name} " " command -v journalctl" & > /dev/null; then
163
165
if [[ " ${on_master} " == " true" ]]; then
@@ -177,7 +179,7 @@ function save-logs() {
177
179
log-dump-ssh " ${node_name} " " sudo journalctl --output=short-precise" > " ${dir} /systemd.log" || true
178
180
fi
179
181
else
180
- files =" ${kern_logfile} ${files } ${initd_logfiles} ${supervisord_logfiles} "
182
+ save_files =" ${kern_logfile} ${save_files } ${initd_logfiles} ${supervisord_logfiles} "
181
183
fi
182
184
183
185
# Try dumping coverage profiles, if it looks like coverage is enabled in the first place.
@@ -198,8 +200,8 @@ function save-logs() {
198
200
echo " Changing logfiles to be world-readable for download"
199
201
log-dump-ssh " ${node_name} " " sudo chmod -R a+r /var/log" || true
200
202
201
- echo " Copying '${files } ' from ${node_name} "
202
- copy-logs-from-node " ${node_name} " " ${dir} " " ${files } "
203
+ echo " Copying '${save_files } ' from ${node_name} "
204
+ copy-logs-from-node " ${node_name} " " ${dir} " " ${save_files } "
203
205
}
204
206
205
207
# Saves a copy of the Windows Docker event log to ${WINDOWS_LOGS_DIR}\docker.log
@@ -245,8 +247,9 @@ function save-windows-logs-via-diagnostics-tool() {
245
247
local node=" ${1} "
246
248
local dest_dir=" ${2} "
247
249
248
- gcloud compute instances add-metadata ${node} --metadata enable-diagnostics=true --project=${PROJECT} --zone=${ZONE}
249
- local logs_archive_in_gcs=$( gcloud alpha compute diagnose export-logs ${node} --zone=${ZONE} --project=${PROJECT} | tail -n 1)
250
+ gcloud compute instances add-metadata " ${node} " --metadata enable-diagnostics=true " --project=${PROJECT} " " --zone=${ZONE} "
251
+ local logs_archive_in_gcs
252
+ logs_archive_in_gcs=$( gcloud alpha compute diagnose export-logs " ${node} " " --zone=${ZONE} " " --project=${PROJECT} " | tail -n 1)
250
253
local temp_local_path=" ${node} .zip"
251
254
for retry in {1..20}; do
252
255
if gsutil mv " ${logs_archive_in_gcs} " " ${temp_local_path} " > /dev/null 2>&1 ; then
@@ -258,8 +261,8 @@ function save-windows-logs-via-diagnostics-tool() {
258
261
done
259
262
260
263
if [[ -f " ${temp_local_path} " ]]; then
261
- unzip ${temp_local_path} -d " ${dest_dir} " > /dev/null
262
- rm -f ${temp_local_path}
264
+ unzip " ${temp_local_path} " -d " ${dest_dir} " > /dev/null
265
+ rm -f " ${temp_local_path} "
263
266
fi
264
267
}
265
268
@@ -272,14 +275,14 @@ function save-windows-logs-via-ssh() {
272
275
export-windows-docker-images-list " ${node} "
273
276
274
277
local remote_files=()
275
- for file in ${windows_node_logfiles[@]} ; do
278
+ for file in " ${windows_node_logfiles[@]} " ; do
276
279
remote_files+=( " ${WINDOWS_LOGS_DIR} \\ ${file} " )
277
280
done
278
281
remote_files+=( " ${windows_node_otherfiles[@]} " )
279
282
280
283
# TODO(pjh, yujuhong): handle rotated logs and copying multiple files at the
281
284
# same time.
282
- for remote_file in ${remote_files[@]} ; do
285
+ for remote_file in " ${remote_files[@]} " ; do
283
286
# Retry up to 3 times to allow ssh keys to be properly propagated and
284
287
# stored.
285
288
for retry in {1..3}; do
@@ -301,7 +304,7 @@ function save-logs-windows() {
301
304
local -r node=" ${1} "
302
305
local -r dest_dir=" ${2} "
303
306
304
- if [[ ! " ${gcloud_supported_providers} " =~ " ${KUBERNETES_PROVIDER} " ]]; then
307
+ if [[ ! " ${gcloud_supported_providers} " =~ ${KUBERNETES_PROVIDER} ]]; then
305
308
echo " Not saving logs for ${node} , Windows log dumping requires gcloud support"
306
309
return
307
310
fi
@@ -323,14 +326,14 @@ function run-in-docker-container() {
323
326
local node_name=" $1 "
324
327
local container=" $2 "
325
328
shift 2
326
- log-dump-ssh " ${node_name} " " docker exec \"\$ (docker ps -f label=io.kubernetes.container.name=${container} --format \" {{.ID}}\" )\" $@ "
329
+ log-dump-ssh " ${node_name} " " docker exec \"\$ (docker ps -f label=io.kubernetes.container.name=${container} --format \" {{.ID}}\" )\" $* "
327
330
}
328
331
329
332
function dump_masters() {
330
333
local master_names
331
334
if [[ -n " ${use_custom_instance_list} " ]]; then
332
- master_names=( $ ( log_dump_custom_get_instances master) )
333
- elif [[ ! " ${master_ssh_supported_providers} " =~ " ${KUBERNETES_PROVIDER} " ]]; then
335
+ while IFS= ' ' read -r line ; do master_names+=( " $line " ) ; done < < ( log_dump_custom_get_instances master)
336
+ elif [[ ! " ${master_ssh_supported_providers} " =~ ${KUBERNETES_PROVIDER} ]]; then
334
337
echo " Master SSH not supported for ${KUBERNETES_PROVIDER} "
335
338
return
336
339
elif [[ -n " ${KUBEMARK_MASTER_NAME:- } " ]]; then
@@ -381,8 +384,8 @@ function dump_nodes() {
381
384
node_names=( " $@ " )
382
385
elif [[ -n " ${use_custom_instance_list} " ]]; then
383
386
echo " Dumping logs for nodes provided by log_dump_custom_get_instances() function"
384
- node_names=( $ ( log_dump_custom_get_instances node) )
385
- elif [[ ! " ${node_ssh_supported_providers} " =~ " ${KUBERNETES_PROVIDER} " ]]; then
387
+ while IFS= ' ' read -r line ; do node_names+=( " $line " ) ; done < < ( log_dump_custom_get_instances node)
388
+ elif [[ ! " ${node_ssh_supported_providers} " =~ ${KUBERNETES_PROVIDER} ]]; then
386
389
echo " Node SSH not supported for ${KUBERNETES_PROVIDER} "
387
390
return
388
391
else
@@ -409,7 +412,7 @@ function dump_nodes() {
409
412
linux_nodes_selected_for_logs=()
410
413
if [[ -n " ${LOGDUMP_ONLY_N_RANDOM_NODES:- } " ]]; then
411
414
# We randomly choose 'LOGDUMP_ONLY_N_RANDOM_NODES' many nodes for fetching logs.
412
- for index in ` shuf -i 0-$(( ${# node_names[*]} - 1 )) -n ${LOGDUMP_ONLY_N_RANDOM_NODES} `
415
+ for index in $( shuf -i 0-$(( ${# node_names[*]} - 1 )) -n " ${LOGDUMP_ONLY_N_RANDOM_NODES} " )
413
416
do
414
417
linux_nodes_selected_for_logs+=(" ${node_names[$index]} " )
415
418
done
@@ -475,7 +478,7 @@ function find_non_logexported_nodes() {
475
478
echo " Successfully listed marker files for successful nodes"
476
479
NON_LOGEXPORTED_NODES=()
477
480
for node in " ${NODE_NAMES[@]} " ; do
478
- if [[ ! " ${succeeded_nodes} " =~ " ${node} " ]]; then
481
+ if [[ ! " ${succeeded_nodes} " =~ ${node} ]]; then
479
482
NON_LOGEXPORTED_NODES+=(" ${node} " )
480
483
fi
481
484
done
@@ -486,7 +489,8 @@ function find_non_logexported_nodes() {
486
489
function dump_nodes_with_logexporter() {
487
490
if [[ -n " ${use_custom_instance_list} " ]]; then
488
491
echo " Dumping logs for nodes provided by log_dump_custom_get_instances() function"
489
- NODE_NAMES=( $( log_dump_custom_get_instances node) )
492
+ NODE_NAMES=()
493
+ while IFS=' ' read -r line; do NODE_NAMES+=(" $line " ); done < <( log_dump_custom_get_instances node)
490
494
else
491
495
echo " Detecting nodes in the cluster"
492
496
detect-node-names & > /dev/null
@@ -498,7 +502,7 @@ function dump_nodes_with_logexporter() {
498
502
fi
499
503
500
504
# Obtain parameters required by logexporter.
501
- local -r service_account_credentials=" $( cat ${GOOGLE_APPLICATION_CREDENTIALS} | base64 | tr -d ' \n' ) "
505
+ local -r service_account_credentials=" $( base64 <<< " ${GOOGLE_APPLICATION_CREDENTIALS}" | tr -d ' \n' ) "
502
506
local -r cloud_provider=" ${KUBERNETES_PROVIDER} "
503
507
local -r enable_hollow_node_logs=" ${ENABLE_HOLLOW_NODE_LOGS:- false} "
504
508
local -r logexport_sleep_seconds=" $(( 90 + NUM_NODES / 3 )) "
@@ -559,7 +563,6 @@ function dump_nodes_with_logexporter() {
559
563
done ; wait)
560
564
561
565
# List registry of marker files (of nodes whose logexporter succeeded) from GCS.
562
- local nodes_succeeded
563
566
for retry in {1..10}; do
564
567
if find_non_logexported_nodes; then
565
568
break
@@ -589,28 +592,29 @@ function dump_nodes_with_logexporter() {
589
592
" ${KUBECTL} " get pods --namespace " ${logexporter_namespace} " || true
590
593
" ${KUBECTL} " delete namespace " ${logexporter_namespace} " || true
591
594
if [[ " ${# failed_nodes[@]} " != 0 ]]; then
592
- echo -e " Dumping logs through SSH for the following nodes:\n${failed_nodes[@ ]} "
595
+ echo -e " Dumping logs through SSH for the following nodes:\n${failed_nodes[* ]} "
593
596
dump_nodes " ${failed_nodes[@]} "
594
597
fi
595
598
}
596
599
597
600
function detect_node_failures() {
598
- if ! [[ " ${gcloud_supported_providers} " =~ " ${KUBERNETES_PROVIDER} " ]]; then
601
+ if ! [[ " ${gcloud_supported_providers} " =~ ${KUBERNETES_PROVIDER} ]]; then
599
602
return
600
603
fi
601
604
602
605
detect-node-names
603
606
if [[ " ${KUBERNETES_PROVIDER} " == " gce" ]]; then
604
- local all_instance_groups=(${INSTANCE_GROUPS[@]} ${WINDOWS_INSTANCE_GROUPS[@]} )
607
+ local all_instance_groups=(" ${INSTANCE_GROUPS[@]} " " ${WINDOWS_INSTANCE_GROUPS[@]} " )
605
608
else
606
- local all_instance_groups=(${INSTANCE_GROUPS[@]} )
609
+ local all_instance_groups=(" ${INSTANCE_GROUPS[@]} " )
607
610
fi
608
611
609
612
if [ -z " ${all_instance_groups:- } " ]; then
610
613
return
611
614
fi
612
615
for group in " ${all_instance_groups[@]} " ; do
613
- local creation_timestamp=$( gcloud compute instance-groups managed describe \
616
+ local creation_timestamp
617
+ creation_timestamp=$( gcloud compute instance-groups managed describe \
614
618
" ${group} " \
615
619
--project " ${PROJECT} " \
616
620
--zone " ${ZONE} " \
0 commit comments