Skip to content

Commit c71a25e

Browse files
gavinfishphp-coder
andauthored
Fix shellcheck warnings/errors in cluster/gce/upgrade.sh (kubernetes#88576)
* Fix shellcheck warnings/errors in cluster/gce/upgrade.sh * Update cluster/gce/upgrade.sh Co-Authored-By: Slava Semushin <[email protected]> Co-authored-by: Slava Semushin <[email protected]>
1 parent f372c54 commit c71a25e

File tree

2 files changed

+50
-38
lines changed

2 files changed

+50
-38
lines changed

cluster/gce/upgrade.sh

Lines changed: 50 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ function prepare-upgrade() {
165165
# ZONE
166166
function get-node-env() {
167167
# TODO(zmerlynn): Make this more reliable with retries.
168-
gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${NODE_NAMES[0]} --command \
168+
gcloud compute --project "${PROJECT}" ssh --zone "${ZONE}" "${NODE_NAMES[0]}" --command \
169169
"curl --fail --silent -H 'Metadata-Flavor: Google' \
170170
'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null
171171
}
@@ -241,24 +241,31 @@ function prepare-node-upgrade() {
241241
echo "== Preparing node upgrade (to ${KUBE_VERSION}). ==" >&2
242242
setup-base-image
243243

244-
SANITIZED_VERSION=$(echo ${KUBE_VERSION} | sed 's/[\.\+]/-/g')
244+
SANITIZED_VERSION="${KUBE_VERSION//[\.\+]/-}"
245245

246246
# TODO(zmerlynn): Refactor setting scope flags.
247247
local scope_flags=
248248
if [ -n "${NODE_SCOPES}" ]; then
249249
scope_flags="--scopes ${NODE_SCOPES}"
250250
else
251+
# shellcheck disable=SC2034 # 'scope_flags' is used by upstream
251252
scope_flags="--no-scopes"
252253
fi
253254

254255
# Get required node env vars from exiting template.
255-
local node_env=$(get-node-env)
256+
local node_env
257+
node_env=$(get-node-env)
256258
KUBE_PROXY_TOKEN=$(get-env-val "${node_env}" "KUBE_PROXY_TOKEN")
259+
export KUBE_PROXY_TOKEN
257260
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${node_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
258261
CA_CERT_BASE64=$(get-env-val "${node_env}" "CA_CERT")
262+
export CA_CERT_BASE64
259263
EXTRA_DOCKER_OPTS=$(get-env-val "${node_env}" "EXTRA_DOCKER_OPTS")
264+
export EXTRA_DOCKER_OPTS
260265
KUBELET_CERT_BASE64=$(get-env-val "${node_env}" "KUBELET_CERT")
266+
export KUBELET_CERT_BASE64
261267
KUBELET_KEY_BASE64=$(get-env-val "${node_env}" "KUBELET_KEY")
268+
export KUBELET_KEY_BASE64
262269

263270
upgrade-node-env
264271

@@ -268,7 +275,8 @@ function prepare-node-upgrade() {
268275

269276
# TODO(zmerlynn): Get configure-vm script from ${version}. (Must plumb this
270277
# through all create-linux-node-instance-template implementations).
271-
local template_name=$(get-template-name-from-version ${SANITIZED_VERSION} ${NODE_INSTANCE_PREFIX})
278+
local template_name
279+
template_name=$(get-template-name-from-version "${SANITIZED_VERSION}" "${NODE_INSTANCE_PREFIX}")
272280
create-linux-node-instance-template "${template_name}"
273281
# The following is echo'd so that callers can get the template name.
274282
echo "Instance template name: ${template_name}"
@@ -281,7 +289,8 @@ function upgrade-node-env() {
281289
# the original node.
282290
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" && "${NODE_PROBLEM_DETECTOR_TOKEN:-}" == "" ]]; then
283291
detect-master
284-
local master_env=$(get-master-env)
292+
local master_env
293+
master_env=$(get-master-env)
285294
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${master_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
286295
fi
287296
}
@@ -293,7 +302,8 @@ function upgrade-node-env() {
293302
function do-single-node-upgrade() {
294303
local -r instance="$1"
295304
local kubectl_rc
296-
local boot_id=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output=jsonpath='{.status.nodeInfo.bootID}' 2>&1) && kubectl_rc=$? || kubectl_rc=$?
305+
local boot_id
306+
boot_id=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output=jsonpath='{.status.nodeInfo.bootID}' 2>&1) && kubectl_rc=$? || kubectl_rc=$?
297307
if [[ "${kubectl_rc}" != 0 ]]; then
298308
echo "== FAILED to get bootID ${instance} =="
299309
echo "${boot_id}"
@@ -313,7 +323,8 @@ function do-single-node-upgrade() {
313323
# Recreate instance
314324
echo "== Recreating instance ${instance}. ==" >&2
315325
local recreate_rc
316-
local recreate=$(gcloud compute instance-groups managed recreate-instances "${group}" \
326+
local recreate
327+
recreate=$(gcloud compute instance-groups managed recreate-instances "${group}" \
317328
--project="${PROJECT}" \
318329
--zone="${ZONE}" \
319330
--instances="${instance}" 2>&1) && recreate_rc=$? || recreate_rc=$?
@@ -329,7 +340,8 @@ function do-single-node-upgrade() {
329340
# it is a best effort approximation.
330341
echo "== Waiting for new node to be added to k8s. ==" >&2
331342
while true; do
332-
local new_boot_id=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output=jsonpath='{.status.nodeInfo.bootID}' 2>&1) && kubectl_rc=$? || kubectl_rc=$?
343+
local new_boot_id
344+
new_boot_id=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output=jsonpath='{.status.nodeInfo.bootID}' 2>&1) && kubectl_rc=$? || kubectl_rc=$?
333345
if [[ "${kubectl_rc}" != 0 ]]; then
334346
echo "== FAILED to get node ${instance} =="
335347
echo "${boot_id}"
@@ -346,7 +358,8 @@ function do-single-node-upgrade() {
346358
# Wait for the node to have Ready=True.
347359
echo "== Waiting for ${instance} to become ready. ==" >&2
348360
while true; do
349-
local ready=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output='jsonpath={.status.conditions[?(@.type == "Ready")].status}')
361+
local ready
362+
ready=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output='jsonpath={.status.conditions[?(@.type == "Ready")].status}')
350363
if [[ "${ready}" != 'True' ]]; then
351364
echo "Node ${instance} is still not ready: Ready=${ready}"
352365
else
@@ -374,14 +387,14 @@ function do-node-upgrade() {
374387
# Do the actual upgrade.
375388
# NOTE(zmerlynn): If you are changing this gcloud command, update
376389
# test/e2e/cluster_upgrade.go to match this EXACTLY.
377-
local template_name=$(get-template-name-from-version ${SANITIZED_VERSION} ${NODE_INSTANCE_PREFIX})
390+
local template_name
391+
template_name=$(get-template-name-from-version "${SANITIZED_VERSION}" "${NODE_INSTANCE_PREFIX}")
378392
local old_templates=()
379-
local updates=()
380-
for group in ${INSTANCE_GROUPS[@]}; do
381-
old_templates+=($(gcloud compute instance-groups managed list \
393+
for group in "${INSTANCE_GROUPS[@]}"; do
394+
while IFS='' read -r line; do old_templates+=("$line"); done < <(gcloud compute instance-groups managed list \
382395
--project="${PROJECT}" \
383396
--filter="name ~ '${group}' AND zone:(${ZONE})" \
384-
--format='value(instanceTemplate)' || true))
397+
--format='value(instanceTemplate)' || true)
385398
set_instance_template_out=$(gcloud compute instance-groups managed set-instance-template "${group}" \
386399
--template="${template_name}" \
387400
--project="${PROJECT}" \
@@ -392,20 +405,20 @@ function do-node-upgrade() {
392405
return ${set_instance_template_rc}
393406
fi
394407
instances=()
395-
instances+=($(gcloud compute instance-groups managed list-instances "${group}" \
408+
while IFS='' read -r line; do instances+=("$line"); done < <(gcloud compute instance-groups managed list-instances "${group}" \
396409
--format='value(instance)' \
397410
--project="${PROJECT}" \
398-
--zone="${ZONE}" 2>&1)) && list_instances_rc=$? || list_instances_rc=$?
411+
--zone="${ZONE}" 2>&1) && list_instances_rc=$? || list_instances_rc=$?
399412
if [[ "${list_instances_rc}" != 0 ]]; then
400413
echo "== FAILED to list instances in group ${group} =="
401-
echo "${instances}"
414+
echo "${instances[@]}"
402415
return ${list_instances_rc}
403416
fi
404417

405418
process_count_left=${node_upgrade_parallelism}
406419
pids=()
407420
ret_code_sum=0 # Should stay 0 in the loop iff all parallel node upgrades succeed.
408-
for instance in ${instances[@]}; do
421+
for instance in "${instances[@]}"; do
409422
do-single-node-upgrade "${instance}" & pids+=("$!")
410423

411424
# We don't want to run more than ${node_upgrade_parallelism} upgrades at a time,
@@ -415,7 +428,7 @@ function do-node-upgrade() {
415428
if [[ process_count_left -eq 0 || "${instance}" == "${instances[-1]}" ]]; then
416429
# Wait for each of the parallel node upgrades to finish.
417430
for pid in "${pids[@]}"; do
418-
wait $pid
431+
wait "$pid"
419432
ret_code_sum=$(( ret_code_sum + $? ))
420433
done
421434
# Return even if at least one of the node upgrades failed.
@@ -430,7 +443,7 @@ function do-node-upgrade() {
430443

431444
# Remove the old templates.
432445
echo "== Deleting old templates in ${PROJECT}. ==" >&2
433-
for tmpl in ${old_templates[@]}; do
446+
for tmpl in "${old_templates[@]}"; do
434447
gcloud compute instance-templates delete \
435448
--quiet \
436449
--project="${PROJECT}" \
@@ -455,11 +468,11 @@ function update-coredns-config() {
455468

456469
# Get the new installed CoreDNS version
457470
echo "Waiting for CoreDNS to update"
458-
until [[ $(${KUBE_ROOT}/cluster/kubectl.sh -n kube-system get deployment coredns -o=jsonpath='{$.metadata.resourceVersion}') -ne ${COREDNS_DEPLOY_RESOURCE_VERSION} ]]; do
471+
until [[ $("${KUBE_ROOT}"/cluster/kubectl.sh -n kube-system get deployment coredns -o=jsonpath='{$.metadata.resourceVersion}') -ne ${COREDNS_DEPLOY_RESOURCE_VERSION} ]]; do
459472
sleep 1
460473
done
461474
echo "Fetching the latest installed CoreDNS version"
462-
NEW_COREDNS_VERSION=$(${KUBE_ROOT}/cluster/kubectl.sh -n kube-system get deployment coredns -o=jsonpath='{$.spec.template.spec.containers[:1].image}' | cut -d ":" -f 2)
475+
NEW_COREDNS_VERSION=$("${KUBE_ROOT}"/cluster/kubectl.sh -n kube-system get deployment coredns -o=jsonpath='{$.spec.template.spec.containers[:1].image}' | cut -d ":" -f 2)
463476

464477
case "$(uname -m)" in
465478
x86_64*)
@@ -502,37 +515,37 @@ function update-coredns-config() {
502515

503516
# Download the CoreDNS migration tool
504517
echo "== Downloading the CoreDNS migration tool =="
505-
wget -P ${download_dir} "https://github.com/coredns/corefile-migration/releases/download/v1.0.6/corefile-tool-${host_arch}" >/dev/null 2>&1
518+
wget -P "${download_dir}" "https://github.com/coredns/corefile-migration/releases/download/v1.0.6/corefile-tool-${host_arch}" >/dev/null 2>&1
506519

507-
local -r checkSHA=$(sha256sum ${download_dir}/corefile-tool-${host_arch} | cut -d " " -f 1)
520+
local -r checkSHA=$(sha256sum "${download_dir}/corefile-tool-${host_arch}" | cut -d " " -f 1)
508521
if [[ "${checkSHA}" != "${corefile_tool_SHA}" ]]; then
509522
echo "!!! CheckSum for the CoreDNS migration tool did not match !!!" >&2
510523
exit 1
511524
fi
512525

513-
chmod +x ${download_dir}/corefile-tool-${host_arch}
526+
chmod +x "${download_dir}/corefile-tool-${host_arch}"
514527

515528
# Migrate the CoreDNS ConfigMap depending on whether it is being downgraded or upgraded.
516-
${KUBE_ROOT}/cluster/kubectl.sh -n kube-system get cm coredns -o jsonpath='{.data.Corefile}' > ${download_dir}/Corefile-old
529+
"${KUBE_ROOT}/cluster/kubectl.sh" -n kube-system get cm coredns -o jsonpath='{.data.Corefile}' > "${download_dir}/Corefile-old"
517530

518-
if test "$(printf '%s\n' ${CURRENT_COREDNS_VERSION} ${NEW_COREDNS_VERSION} | sort -V | head -n 1)" != ${NEW_COREDNS_VERSION}; then
531+
if test "$(printf '%s\n' "${CURRENT_COREDNS_VERSION}" "${NEW_COREDNS_VERSION}" | sort -V | head -n 1)" != "${NEW_COREDNS_VERSION}"; then
519532
echo "== Upgrading the CoreDNS ConfigMap =="
520-
${download_dir}/corefile-tool-${host_arch} migrate --from ${CURRENT_COREDNS_VERSION} --to ${NEW_COREDNS_VERSION} --corefile ${download_dir}/Corefile-old > ${download_dir}/Corefile
521-
${KUBE_ROOT}/cluster/kubectl.sh -n kube-system create configmap coredns --from-file ${download_dir}/Corefile -o yaml --dry-run=client | ${KUBE_ROOT}/cluster/kubectl.sh apply -f -
533+
"${download_dir}/corefile-tool-${host_arch}" migrate --from "${CURRENT_COREDNS_VERSION}" --to "${NEW_COREDNS_VERSION}" --corefile "${download_dir}/Corefile-old" > "${download_dir}/Corefile"
534+
"${KUBE_ROOT}/cluster/kubectl.sh" -n kube-system create configmap coredns --from-file "${download_dir}/Corefile" -o yaml --dry-run=client | "${KUBE_ROOT}/cluster/kubectl.sh" apply -f -
522535
else
523536
# In case of a downgrade, a custom CoreDNS Corefile will be overwritten by a default Corefile. In that case,
524537
# the user will need to manually modify the resulting (default) Corefile after the downgrade is complete.
525538
echo "== Applying the latest default CoreDNS configuration =="
526-
gcloud compute --project ${PROJECT} scp --zone ${ZONE} ${MASTER_NAME}:${coredns_addon_path}/coredns.yaml ${download_dir}/coredns-manifest.yaml > /dev/null
527-
${KUBE_ROOT}/cluster/kubectl.sh apply -f ${download_dir}/coredns-manifest.yaml
539+
gcloud compute --project "${PROJECT}" scp --zone "${ZONE}" "${MASTER_NAME}:${coredns_addon_path}/coredns.yaml" "${download_dir}/coredns-manifest.yaml" > /dev/null
540+
"${KUBE_ROOT}/cluster/kubectl.sh" apply -f "${download_dir}/coredns-manifest.yaml"
528541
fi
529542

530543
echo "== The CoreDNS Config has been updated =="
531544
}
532545

533546
echo "Fetching the previously installed CoreDNS version"
534-
CURRENT_COREDNS_VERSION=$(${KUBE_ROOT}/cluster/kubectl.sh -n kube-system get deployment coredns -o=jsonpath='{$.spec.template.spec.containers[:1].image}' | cut -d ":" -f 2)
535-
COREDNS_DEPLOY_RESOURCE_VERSION=$(${KUBE_ROOT}/cluster/kubectl.sh -n kube-system get deployment coredns -o=jsonpath='{$.metadata.resourceVersion}')
547+
CURRENT_COREDNS_VERSION=$("${KUBE_ROOT}/cluster/kubectl.sh" -n kube-system get deployment coredns -o=jsonpath='{$.spec.template.spec.containers[:1].image}' | cut -d ":" -f 2)
548+
COREDNS_DEPLOY_RESOURCE_VERSION=$("${KUBE_ROOT}/cluster/kubectl.sh" -n kube-system get deployment coredns -o=jsonpath='{$.metadata.resourceVersion}')
536549

537550
master_upgrade=true
538551
node_upgrade=true
@@ -542,7 +555,7 @@ env_os_distro=false
542555
node_upgrade_parallelism=1
543556

544557
while getopts ":MNPlcho" opt; do
545-
case ${opt} in
558+
case "${opt}" in
546559
M)
547560
node_upgrade=false
548561
;;
@@ -603,7 +616,7 @@ if [[ -z "${STORAGE_MEDIA_TYPE:-}" ]] && [[ "${STORAGE_BACKEND:-}" != "etcd2" ]]
603616
echo "export STORAGE_MEDIA_TYPE=application/json"
604617
echo ""
605618
if [ -t 0 ] && [ -t 1 ]; then
606-
read -p "Would you like to continue with the new default, and lose the ability to downgrade to etcd2? [y/N] " confirm
619+
read -r -p "Would you like to continue with the new default, and lose the ability to downgrade to etcd2? [y/N] " confirm
607620
if [[ "${confirm}" != "y" ]]; then
608621
exit 1
609622
fi
@@ -638,7 +651,7 @@ if [[ "${master_upgrade}" == "true" ]]; then
638651
echo "In all cases, it is strongly recommended to have an etcd backup before upgrading."
639652
echo
640653
if [ -t 0 ] && [ -t 1 ]; then
641-
read -p "Continue with default etcd version, which might upgrade etcd? [y/N] " confirm
654+
read -r -p "Continue with default etcd version, which might upgrade etcd? [y/N] " confirm
642655
if [[ "${confirm}" != "y" ]]; then
643656
exit 1
644657
fi
@@ -652,7 +665,7 @@ fi
652665
print-node-version-info "Pre-Upgrade"
653666

654667
if [[ "${local_binaries}" == "false" ]]; then
655-
set_binary_version ${1}
668+
set_binary_version "${1}"
656669
fi
657670

658671
prepare-upgrade

hack/.shellcheck_failures

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
./cluster/gce/gci/configure-helper.sh
22
./cluster/gce/gci/configure.sh
33
./cluster/gce/gci/master-helper.sh
4-
./cluster/gce/upgrade.sh
54
./cluster/gce/util.sh
65
./cluster/log-dump/log-dump.sh

0 commit comments

Comments
 (0)