Skip to content

Commit bcc6b22

Browse files
authored
Merge pull request kubernetes#85913 from jkaniuk/log-dump-fix
Fix waiting for logexporter log fetching processes
2 parents 2f58d2e + 2dc3684 commit bcc6b22

File tree

1 file changed

+4
-7
lines changed

1 file changed

+4
-7
lines changed

cluster/log-dump/log-dump.sh

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -504,10 +504,10 @@ function dump_nodes_with_logexporter() {
504504
# Store logs from logexporter pods to allow debugging log exporting process
505505
# itself.
506506
proc=${max_dump_processes}
507-
"${KUBECTL}" get pods -n "${logexporter_namespace}" -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.nodeName}{"\n"}{end}' | while read pod node; do
507+
"${KUBECTL}" get pods -n "${logexporter_namespace}" -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.nodeName}{"\n"}{end}' | (while read -r pod node; do
508508
echo "Fetching logs from ${pod} running on ${node}"
509-
mkdir -p ${report_dir}/${node}
510-
"${KUBECTL}" logs -n "${logexporter_namespace}" ${pod} > ${report_dir}/${node}/${pod}.log &
509+
mkdir -p "${report_dir}/${node}"
510+
"${KUBECTL}" logs -n "${logexporter_namespace}" "${pod}" > "${report_dir}/${node}/${pod}.log" &
511511

512512
# We don't want to run more than ${max_dump_processes} at a time, so
513513
# wait once we hit that many nodes. This isn't ideal, since one might
@@ -517,11 +517,8 @@ function dump_nodes_with_logexporter() {
517517
proc=${max_dump_processes}
518518
wait
519519
fi
520-
done
521520
# Wait for any remaining processes.
522-
if [[ proc -gt 0 && proc -lt ${max_dump_processes} ]]; then
523-
wait
524-
fi
521+
done; wait)
525522

526523
# List registry of marker files (of nodes whose logexporter succeeded) from GCS.
527524
local nodes_succeeded

0 commit comments

Comments
 (0)