diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 441b0a4391..44b34a13df 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,8 @@ blank_issues_enabled: false contact_links: - name: Create Release - about: Please create the release issue in the internal Issue Tracker repository. - _Only accessible by maintainers._ + about: Please create the release issue in the internal Issue Tracker repository. - Only accessible by maintainers. url: https://github.com/elastisys/ck8s-issue-tracker/issues/new?template=release-apps.md - name: Create Patch - about: Please create the patch issue in the internal Issue Tracker repository. - _Only accessible by maintainers._ + about: Please create the patch issue in the internal Issue Tracker repository. - Only accessible by maintainers. url: https://github.com/elastisys/ck8s-issue-tracker/issues/new?template=patch-apps.md diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d3932e30e7..9849ef1b6c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,6 +4,7 @@ repos: hooks: - id: check-executables-have-shebangs - id: check-merge-conflict + exclude: ^helmfile.d/upstream/opensearch-project/opensearch/CHANGELOG.md$ - id: check-json exclude: ^helmfile\.d/upstream/ - id: check-toml @@ -22,16 +23,52 @@ repos: - id: trailing-whitespace exclude: ^helmfile\.d/upstream/|^tests/.*/.*\.out$ - - repo: https://github.com/koalaman/shellcheck-precommit - rev: v0.10.0 + - repo: local hooks: + - id: generate-cypress-tests + name: generate cypress tests + language: script + require_serial: true + files: '.*\.cy.js' + entry: tests/common/gen.bash + args: + - cypress + + - id: generate-template-tests + name: generate template tests + language: script + require_serial: true + files: '.*\.bats.gotmpl' + entry: tests/common/gen.bash + args: + - template + - id: shellcheck name: check scripts - exclude: ^helmfile\.d/charts/opensearch/configurer/files/|^helmfile\.d/upstream/ + types: [shell] + exclude_types: [csh, tcsh, zsh] + exclude: ^helmfile\.d/charts/opensearch/configurer/files/|^helmfile\.d/upstream/|^images/elasticsearch-curator/ + language: script + require_serial: true + entry: scripts/run-from-container.sh + args: + - --network=none + - docker.io/koalaman/shellcheck:v0.10.0@sha256:2097951f02e735b613f4a34de20c40f937a6c8f18ecb170612c88c34517221fb + - --color=always + - --external-sources + + - id: shfmt + name: format scripts + types: [shell] + exclude_types: [csh, tcsh, zsh] + exclude: ^helmfile\.d/charts/opensearch/configurer/files/|^helmfile\.d/upstream/|^images/elasticsearch-curator/ + language: script require_serial: true + entry: scripts/run-from-container.sh args: - - --color=always - - --external-sources + - --network=none + - docker.io/mvdan/shfmt:v3.10.0@sha256:d19cc37644449fe9a488f234d2c0cf0b770eaf6a5a40e30103e8099013ef8f9e + - -w - repo: https://github.com/codespell-project/codespell rev: v2.3.0 @@ -51,23 +88,3 @@ repos: exclude: ^changelog/|^docs/sbom.md$|^helmfile\.d/charts/opensearch/configurer/files/|^helmfile\.d/upstream/|^images/elasticsearch-curator/ args: - --fix - - - repo: local - hooks: - - id: generate-cypress-tests - name: generate cypress tests - language: script - require_serial: true - files: '.*\.cy.js' - entry: tests/common/gen.bash - args: - - cypress - - - id: generate-template-tests - name: generate template tests - language: script - require_serial: true - files: '.*\.bats.gotmpl' - entry: tests/common/gen.bash - args: - - template diff --git a/bin/apply.bash b/bin/apply.bash index 9a45336446..5e6b097d9b 100755 --- a/bin/apply.bash +++ b/bin/apply.bash @@ -61,11 +61,14 @@ declare environment case "${1:-}" in sc) - environment="service_cluster" ;; + environment="service_cluster" + ;; wc) - environment="workload_cluster" ;; + environment="workload_cluster" + ;; *) - usage "${1:-}" ;; + usage "${1:-}" + ;; esac update_ips_dryrun "$1" "${environment}" diff --git a/bin/ck8s b/bin/ck8s index 174ad82e91..146fe01c3b 100755 --- a/bin/ck8s +++ b/bin/ck8s @@ -9,34 +9,34 @@ here="$(dirname "$(readlink -f "$0")")" source "${here}/common.bash" usage() { - echo "COMMANDS:" 1>&2 - echo " apply [--sync] [--concurrency=] deploy the apps" 1>&2 - echo " clean Cleans the cluster of apps" 1>&2 - echo " completion bash output shell completion code for bash" 1>&2 - echo " diagnostics [--help] Runs diagnostics of apps" 1>&2 - echo " dry-run [--kubectl] runs helmfile diff" 1>&2 - echo " explain [key.to.parameter] explains the config or secrets" 1>&2 - echo " fix-psp-violations Checks and restarts pods that violates Pod Security Polices, applicable for new environments" 1>&2 - echo " flavors lists supported configuration flavors" 1>&2 - echo " init [--generate-new-secrets] initialize the config path" 1>&2 - echo " install-requirements [--user] [--no-pass] installs or updates required tools to run compliantkubernetes-apps" 1>&2 - echo " k8s-installers lists supported kubernetes installers" 1>&2 - echo " kubeconfig generate user kubeconfig, stored at CK8S_CONFIG_PATH/user" 1>&2 - echo " ops helm run helm as cluster admin" 1>&2 - echo " ops helmfile run helmfile as cluster admin" 1>&2 - echo " ops kubecolor run kubecolor as cluster admin" 1>&2 - echo " ops kubectl run kubectl as cluster admin" 1>&2 - echo " ops velero run velero as cluster admin" 1>&2 - echo " providers lists supported cloud providers" 1>&2 - echo " s3cmd [cmd] run s3cmd" 1>&2 - echo " team add-pgp add a new PGP key to secrets" 1>&2 - echo " team remove-pgp remove a PGP key from secrets and rotate the data encryption key" 1>&2 - echo " test [--logging-enabled] test the applications" 1>&2 - echo " update-ips Automatically fetches and applies the IPs for network policies" 1>&2 - echo " upgrade apply runs all apply steps upgrading the environment" 1>&2 - echo " upgrade prepare runs all prepare steps upgrading the configuration" 1>&2 - echo " validate validates config files" 1>&2 - exit 1 + echo "COMMANDS:" 1>&2 + echo " apply [--sync] [--concurrency=] deploy the apps" 1>&2 + echo " clean Cleans the cluster of apps" 1>&2 + echo " completion bash output shell completion code for bash" 1>&2 + echo " diagnostics [--help] Runs diagnostics of apps" 1>&2 + echo " dry-run [--kubectl] runs helmfile diff" 1>&2 + echo " explain [key.to.parameter] explains the config or secrets" 1>&2 + echo " fix-psp-violations Checks and restarts pods that violates Pod Security Polices, applicable for new environments" 1>&2 + echo " flavors lists supported configuration flavors" 1>&2 + echo " init [--generate-new-secrets] initialize the config path" 1>&2 + echo " install-requirements [--user] [--no-pass] installs or updates required tools to run compliantkubernetes-apps" 1>&2 + echo " k8s-installers lists supported kubernetes installers" 1>&2 + echo " kubeconfig generate user kubeconfig, stored at CK8S_CONFIG_PATH/user" 1>&2 + echo " ops helm run helm as cluster admin" 1>&2 + echo " ops helmfile run helmfile as cluster admin" 1>&2 + echo " ops kubecolor run kubecolor as cluster admin" 1>&2 + echo " ops kubectl run kubectl as cluster admin" 1>&2 + echo " ops velero run velero as cluster admin" 1>&2 + echo " providers lists supported cloud providers" 1>&2 + echo " s3cmd [cmd] run s3cmd" 1>&2 + echo " team add-pgp add a new PGP key to secrets" 1>&2 + echo " team remove-pgp remove a PGP key from secrets and rotate the data encryption key" 1>&2 + echo " test [--logging-enabled] test the applications" 1>&2 + echo " update-ips Automatically fetches and applies the IPs for network policies" 1>&2 + echo " upgrade apply runs all apply steps upgrading the environment" 1>&2 + echo " upgrade prepare runs all prepare steps upgrading the configuration" 1>&2 + echo " validate validates config files" 1>&2 + exit 1 } SYNC="" @@ -44,133 +44,112 @@ KUBECTL="" GEN_NEW_SECRETS="" CONCURRENCY="--concurrency=8" -for arg in "$@"; do - case "$arg" in - "--sync") SYNC="sync" ;; - "--kubectl") KUBECTL="kubectl" ;; - "--generate-new-secrets") GEN_NEW_SECRETS="--generate-new-secrets" ;; - "--concurrency="*) CONCURRENCY="$arg" ;; +for arg in "${@}"; do + case "${arg}" in + "--sync") SYNC="sync" ;; + "--kubectl") KUBECTL="kubectl" ;; + "--generate-new-secrets") GEN_NEW_SECRETS="--generate-new-secrets" ;; + "--concurrency="*) CONCURRENCY="${arg}" ;; esac done case "${1}" in - init) - [[ "${2}" =~ ^(wc|sc|both)$ ]] || usage - check_tools - export CK8S_CLUSTER="${2}" - "${here}/init.bash" "${GEN_NEW_SECRETS}" - ;; - apply) - [[ "${2}" =~ ^(wc|sc)$ ]] || usage - check_tools - "${here}/apply.bash" "${2}" "${SYNC}" "${CONCURRENCY}" - ;; - test) - [[ "${2}" =~ ^(wc|sc)$ ]] || usage - check_tools - with_kubeconfig "${config["kube_config_${2}"]}" "${here}/test.bash" "${@:2}" - ;; - dry-run) - [[ "${2}" =~ ^(wc|sc)$ ]] || usage - check_tools - "${here}/dry-run.bash" "${2}" "${KUBECTL}" - ;; - upgrade) - [[ "${2}" =~ ^(wc|sc|both)$ ]] || usage - [[ "${3}" =~ ^(v[0-9]+\.[0-9]+)$ ]] || usage - [[ "${4}" =~ ^(prepare|apply)$ ]] || usage - check_tools - export CK8S_CLUSTER="${2}" - "${here}/upgrade.bash" "${3}" "${4}" - ;; - team) - case "${2}" in - add-pgp|remove-pgp) - [ -n "${3}" ] || usage - "${here}/team.bash" "${2}" "${3}" - ;; - *) usage ;; - esac - ;; - ops) - case "${2}" in - kubectl) - [[ "${3}" =~ ^(wc|sc)$ ]] || usage - shift 2 - "${here}/ops.bash" kubectl "${@}" - ;; - kubecolor) - [[ "${3}" =~ ^(wc|sc)$ ]] || usage - shift 2 - "${here}/ops.bash" kubecolor "${@}" - ;; - helm) - [[ "${3}" =~ ^(wc|sc)$ ]] || usage - shift 2 - "${here}/ops.bash" helm "${@}" - ;; - helmfile) - [[ "${3}" =~ ^(wc|sc)$ ]] || usage - shift 2 - "${here}/ops.bash" helmfile "${@}" - ;; - velero) - [[ "${3}" =~ ^(wc|sc)$ ]] || usage - shift 2 - "${here}/ops.bash" velero "${@}" - ;; - *) usage ;; - esac +init) + [[ "${2}" =~ ^(wc|sc|both)$ ]] || usage + check_tools + export CK8S_CLUSTER="${2}" + "${here}/init.bash" "${GEN_NEW_SECRETS}" + ;; +apply) + [[ "${2}" =~ ^(wc|sc)$ ]] || usage + check_tools + "${here}/apply.bash" "${2}" "${SYNC}" "${CONCURRENCY}" + ;; +test) + [[ "${2}" =~ ^(wc|sc)$ ]] || usage + check_tools + with_kubeconfig "${config["kube_config_${2}"]}" "${here}/test.bash" "${@:2}" + ;; +dry-run) + [[ "${2}" =~ ^(wc|sc)$ ]] || usage + check_tools + "${here}/dry-run.bash" "${2}" "${KUBECTL}" + ;; +upgrade) + [[ "${2}" =~ ^(wc|sc|both)$ ]] || usage + [[ "${3}" =~ ^(v[0-9]+\.[0-9]+)$ ]] || usage + [[ "${4}" =~ ^(prepare|apply)$ ]] || usage + check_tools + export CK8S_CLUSTER="${2}" + "${here}/upgrade.bash" "${3}" "${4}" + ;; +team) + case "${2}" in + add-pgp | remove-pgp) + [ -n "${3}" ] || usage + "${here}/team.bash" "${2}" "${3}" ;; - s3cmd) - shift - sops_exec_file "${secrets[s3cfg_file]}" 's3cmd --config="{}" '"${*}" + *) usage ;; + esac + ;; +ops) + case "${2}" in + kubectl | kubecolor | helm | helmfile | velero) + [[ "${3}" =~ ^(wc|sc)$ ]] || usage + shift 2 + "${here}/ops.bash" "${2}" "${@}" ;; - kubeconfig) - shift - "${here}/kubeconfig.bash" "${@}" - ;; - completion) - [ -f "${here}/../completion/${2}" ] || usage - cat "${here}/../completion/${2}" - ;; - install-requirements) - shift - [ -f "${here}/install-requirements.bash" ] || usage - "${here}/install-requirements.bash" "${@}" - ;; - validate) - [[ "${2}" =~ ^(wc|sc)$ ]] || usage - check_tools - config_load "$2" - echo "Config validation successful" - ;; - providers) echo "${ck8s_cloud_providers[@]}" ;; - flavors) echo "${ck8s_flavors[@]}" ;; - k8s-installers) echo "${ck8s_k8s_installers[@]}" ;; - explain) - [[ "${2}" =~ ^(config|secrets)$ ]] || usage - shift - "${here}/explain.bash" "${@}" - ;; - update-ips) - [[ "${2}" =~ ^(wc|sc|both)$ ]] || usage - [[ "${3}" =~ ^(apply|dry-run)$ ]] || usage - "${here}/update-ips.bash" "${2}" "${3}" - ;; - fix-psp-violations) - [[ "${2}" =~ ^(wc|sc)$ ]] || usage - check_tools - export CK8S_CLUSTER="${2}" - "${here}/fix-psp-violations.bash" - ;; - clean) - "${here}/clean.bash" "${2}" - ;; - diagnostics) - [[ "${2}" =~ ^(wc|sc)$ ]] || usage - shift - "${here}/diagnostics.bash" "${@}" - ;; - *) usage ;; + *) usage ;; + esac + ;; +s3cmd) + shift + sops_exec_file "${secrets[s3cfg_file]}" 's3cmd --config="{}" '"${*}" + ;; +kubeconfig) + shift + "${here}/kubeconfig.bash" "${@}" + ;; +completion) + [ -f "${here}/../completion/${2}" ] || usage + cat "${here}/../completion/${2}" + ;; +install-requirements) + shift + "${here}/install-requirements.bash" "${@}" + ;; +validate) + [[ "${2}" =~ ^(wc|sc)$ ]] || usage + check_tools + config_load "$2" + echo "Config validation successful" + ;; +providers) echo "${ck8s_cloud_providers[@]}" ;; +flavors) echo "${ck8s_flavors[@]}" ;; +k8s-installers) echo "${ck8s_k8s_installers[@]}" ;; +explain) + [[ "${2}" =~ ^(config|secrets)$ ]] || usage + shift + "${here}/explain.bash" "${@}" + ;; +update-ips) + [[ "${2}" =~ ^(wc|sc|both)$ ]] || usage + [[ "${3}" =~ ^(apply|dry-run)$ ]] || usage + "${here}/update-ips.bash" "${2}" "${3}" + ;; +fix-psp-violations) + [[ "${2}" =~ ^(wc|sc)$ ]] || usage + check_tools + export CK8S_CLUSTER="${2}" + "${here}/fix-psp-violations.bash" + ;; +clean) + "${here}/clean.bash" "${2}" + ;; +diagnostics) + [[ "${2}" =~ ^(wc|sc)$ ]] || usage + shift + "${here}/diagnostics.bash" "${@}" + ;; +*) usage ;; esac diff --git a/bin/clean.bash b/bin/clean.bash index 0a02465f46..40d3027c41 100755 --- a/bin/clean.bash +++ b/bin/clean.bash @@ -3,8 +3,8 @@ set -eu -o pipefail usage() { - echo "Usage: clean " >&2 - exit 1 + echo "Usage: clean " >&2 + exit 1 } here="$(dirname "$(readlink -f "$0")")" @@ -14,7 +14,7 @@ source "${here}/common.bash" cluster="${1}" if [[ $cluster != "wc" && $cluster != "sc" ]]; then - usage + usage fi "${scripts_path}/clean-${cluster}.sh" diff --git a/bin/common.bash b/bin/common.bash index 389aa2796b..02fa727429 100644 --- a/bin/common.bash +++ b/bin/common.bash @@ -10,14 +10,14 @@ root_path="${here}/.." # shellcheck disable=SC2034 mapfile -t ck8s_cloud_providers < <(find "${root_path}/config/providers" -mindepth 1 -maxdepth 1 -type d -printf '%f\n') -ck8s_cloud_providers+=( "none" ) +ck8s_cloud_providers+=("none") # shellcheck disable=SC2034 mapfile -t ck8s_flavors < <(find "${root_path}/config/flavors" -mindepth 1 -maxdepth 1 -type d -printf '%f\n') # shellcheck disable=SC2034 mapfile -t ck8s_k8s_installers < <(find "${root_path}/config/k8s-installers" -mindepth 1 -maxdepth 1 -type d -printf '%f\n') -ck8s_k8s_installers+=( "none" ) +ck8s_k8s_installers+=("none") CK8S_AUTO_APPROVE=${CK8S_AUTO_APPROVE:-"false"} @@ -62,27 +62,27 @@ secrets["secrets_file"]="${CK8S_CONFIG_PATH}/secrets.yaml" secrets["s3cfg_file"]="${state_path}/s3cfg.ini" log_info_no_newline() { - echo -e -n "[\e[34mck8s\e[0m] ${*}" 1>&2 + echo -e -n "[\e[34mck8s\e[0m] ${*}" 1>&2 } log_info() { - log_info_no_newline "${*}\n" + log_info_no_newline "${*}\n" } log_warning_no_newline() { - echo -e -n "[\e[33mck8s\e[0m] ${*}" 1>&2 + echo -e -n "[\e[33mck8s\e[0m] ${*}" 1>&2 } log_warning() { - log_warning_no_newline "${*}\n" + log_warning_no_newline "${*}\n" } log_error_no_newline() { - echo -e -n "[\e[31mck8s\e[0m] ${*}" 1>&2 + echo -e -n "[\e[31mck8s\e[0m] ${*}" 1>&2 } log_error() { - log_error_no_newline "${*}\n" + log_error_no_newline "${*}\n" } log_fatal() { @@ -111,7 +111,7 @@ check_tools() { err=0 for executable in jq yq4 s3cmd sops kubectl helm helmfile dig pwgen htpasswd yajsv; do - if ! command -v "${executable}" > /dev/null; then + if ! command -v "${executable}" >/dev/null; then log_error "Required dependency ${executable} missing" err=1 fi @@ -127,8 +127,8 @@ check_tools() { local v1 local v2 - v1="$(sed -r -e 's/^v//' -e 's/\.[0-9](-[0-9])?$/\.\*/' -e 's/\./\\\./g' -e 's/\*/\.*/g' <<< "${1}")" - v2="$(sed -nr 's/.*([0-9]+\.[0-9]+\.[0-9]+).*/\1/p' <<< "${2}")" + v1="$(sed -r -e 's/^v//' -e 's/\.[0-9](-[0-9])?$/\.\*/' -e 's/\./\\\./g' -e 's/\*/\.*/g' <<<"${1}")" + v2="$(sed -nr 's/.*([0-9]+\.[0-9]+\.[0-9]+).*/\1/p' <<<"${2}")" if ! [[ "${v2}" =~ ${v1} ]]; then log_warning "Required dependency ${3} not using recommended version: (expected ${1##v} - actual ${v2})" @@ -137,7 +137,7 @@ check_tools() { } check_minor "$(echo "${req}" | jq -r '.["github.com/mikefarah/yq/v4"].version')" "$(yq4 --version)" yq4 - check_minor "$(echo "${req}" | jq -r '.["kubectl"].version')" "$(kubectl version -oyaml 2> /dev/null | yq4 '.clientVersion.gitVersion')" kubectl + check_minor "$(echo "${req}" | jq -r '.["kubectl"].version')" "$(kubectl version -oyaml 2>/dev/null | yq4 '.clientVersion.gitVersion')" kubectl check_minor "$(echo "${req}" | jq -r '.["helm.sh/helm/v3"].version')" "$(helm version --template='{{.Version}}')" helm check_minor "$(echo "${req}" | jq -r '.["github.com/helmfile/helmfile"].version')" "$(helmfile --version)" helmfile check_minor "$(echo "${req}" | jq -r '.["github.com/databus23/helm-diff/v3"].version')" "$(helm plugin list | grep diff)" "helm diff plugin" @@ -156,181 +156,181 @@ check_tools() { # Merges all yaml files in order # Usage: yq_merge yq_merge() { - # shellcheck disable=SC2016 - yq4 eval-all --prettyPrint 'explode(.) as $item ireduce ({}; . * $item )' "${@}" + # shellcheck disable=SC2016 + yq4 eval-all --prettyPrint 'explode(.) as $item ireduce ({}; . * $item )' "${@}" } # Reads the path to a block from one file containing the value # Usage: yq_read_block yq_read_block() { - source=$1 - value=$2 - # shellcheck disable=SC2140 - yq4 ".. | select(tag != \"!!map\" and . == \"${value}\") | path | with(.[]; . = (\"\\\"\" + .) + \"\\\"\" ) | \".\" + join \".\"" "${source}" | sed -r 's/\."[0-9]+".*//' | sed -r 's/\\//g' | uniq + source=$1 + value=$2 + # shellcheck disable=SC2140 + yq4 ".. | select(tag != \"!!map\" and . == \"${value}\") | path | with(.[]; . = (\"\\\"\" + .) + \"\\\"\" ) | \".\" + join \".\"" "${source}" | sed -r 's/\."[0-9]+".*//' | sed -r 's/\\//g' | uniq } # Copies a block from one file to another # Usage: yq_copy_block yq_copy_block() { - prefix=$(yq4 -n ".$3 | path | reverse | .[] as \$i ireduce(\".\"; \"{\\\"\" + \$i + \"\\\":\" + . + \"}\")") - yq4 ".${3}" "${1}" -o json | \ - yq4 "${prefix}" | \ + prefix=$(yq4 -n ".$3 | path | reverse | .[] as \$i ireduce(\".\"; \"{\\\"\" + \$i + \"\\\":\" + . + \"}\")") + yq4 ".${3}" "${1}" -o json | + yq4 "${prefix}" | yq4 eval-all 'select(fi == 0) * select(fi == 1)' -i "${2}" - --prettyPrint } # Usage: yq_copy_commons yq_copy_commons() { - source1=$1 - source2=$2 - target=$3 - - keys=$(yq_merge "${source1}" "${source2}" | yq4 '.. | select(tag != "!!map") | path | with(.[]; . = ("\"" + .) + "\"" ) | join "."' | sed -r 's/\."[0-9]+".*//' | sed -r 's/\\//g' | uniq) - for key in ${keys}; do - compare=$(diff <(yq4 -oj ".${key}" "${source1}" ) <(yq4 -oj ".${key}" "${source2}" ) || true) - if [[ -z "${compare}" ]]; then - value=$(yq4 ".${key}" "${source1}" ) - if [[ -z "${value}" ]]; then - log_error "Unknown key to copy from: ${key}" - exit 1 - fi - yq_copy_block "${source1}" "${target}" "${key}" - fi - done + source1=$1 + source2=$2 + target=$3 + + keys=$(yq_merge "${source1}" "${source2}" | yq4 '.. | select(tag != "!!map") | path | with(.[]; . = ("\"" + .) + "\"" ) | join "."' | sed -r 's/\."[0-9]+".*//' | sed -r 's/\\//g' | uniq) + for key in ${keys}; do + compare=$(diff <(yq4 -oj ".${key}" "${source1}") <(yq4 -oj ".${key}" "${source2}") || true) + if [[ -z "${compare}" ]]; then + value=$(yq4 ".${key}" "${source1}") + if [[ -z "${value}" ]]; then + log_error "Unknown key to copy from: ${key}" + exit 1 + fi + yq_copy_block "${source1}" "${target}" "${key}" + fi + done } # Usage: yq_copy_changes yq_copy_changes() { - source1=$1 - source2=$2 - target=$3 - - keys=$(yq4 '.. | select(tag != "!!map" or (keys|length)==0) | path | with(.[]; . = ("\"" + .) + "\"" ) | join "."' "$source2" | sed -r 's/\."[0-9]+".*//' | uniq) - for key in ${keys}; do - compare=$(diff <(yq4 -oj ".${key}" "${source1}" ) <(yq4 -oj ".${key}" "${source2}" ) || true) - if [[ -n "${compare}" ]]; then - if [[ -n "$(yq4 ".${key} | select(tag == \"\") | alias" "${source2}")" ]]; then - # Creating placeholder for alias - yq4 -i ".${key} = {}" "${target}" - else - yq_copy_block "${source2}" "${target}" "${key}" - fi - fi - done + source1=$1 + source2=$2 + target=$3 + + keys=$(yq4 '.. | select(tag != "!!map" or (keys|length)==0) | path | with(.[]; . = ("\"" + .) + "\"" ) | join "."' "$source2" | sed -r 's/\."[0-9]+".*//' | uniq) + for key in ${keys}; do + compare=$(diff <(yq4 -oj ".${key}" "${source1}") <(yq4 -oj ".${key}" "${source2}") || true) + if [[ -n "${compare}" ]]; then + if [[ -n "$(yq4 ".${key} | select(tag == \"\") | alias" "${source2}")" ]]; then + # Creating placeholder for alias + yq4 -i ".${key} = {}" "${target}" + else + yq_copy_block "${source2}" "${target}" "${key}" + fi + fi + done - anchors="$(yq4 '.. | select(anchor != "") | path | with(.[]; . = ("\"" + .) + "\"" ) | join "."' "${source2}")" - for anchor in ${anchors}; do - name="$(yq4 ".$anchor | anchor" "${source2}")" - # Protecting anchor from unwanted change - yq4 -i ".$anchor = (load(\"$source2\") | .$anchor)" "${target}" - # Putting anchor in place - yq4 -i ".$anchor anchor = \"$name\"" "${target}" - done + anchors="$(yq4 '.. | select(anchor != "") | path | with(.[]; . = ("\"" + .) + "\"" ) | join "."' "${source2}")" + for anchor in ${anchors}; do + name="$(yq4 ".$anchor | anchor" "${source2}")" + # Protecting anchor from unwanted change + yq4 -i ".$anchor = (load(\"$source2\") | .$anchor)" "${target}" + # Putting anchor in place + yq4 -i ".$anchor anchor = \"$name\"" "${target}" + done - # The alias function will return leaf values, but they don't have a tag so filter on those - aliases="$(yq4 '.. | select(tag == "") | alias | path | with(.[]; . = ("\"" + .) + "\"" ) | join "."' "${source2}")" - for alias in ${aliases}; do - name="$(yq4 ".$alias | alias" "${source2}")" - # Putting alias in place - yq4 -i ".$alias alias = \"$name\"" "${target}" - done + # The alias function will return leaf values, but they don't have a tag so filter on those + aliases="$(yq4 '.. | select(tag == "") | alias | path | with(.[]; . = ("\"" + .) + "\"" ) | join "."' "${source2}")" + for alias in ${aliases}; do + name="$(yq4 ".$alias | alias" "${source2}")" + # Putting alias in place + yq4 -i ".$alias alias = \"$name\"" "${target}" + done } # Usage: yq_copy_values yq_copy_values() { - source1=$1 - source2=$2 - target=$3 - value=$4 - - keys=$(yq_read_block "${source1}" "${value}") - for key in ${keys}; do - compare=$(yq4 "${key}" "${source2}") - if [[ "${compare}" == "null" ]]; then - yq_copy_block "${source1}" "${target}" "${key:1}" - fi - done + source1=$1 + source2=$2 + target=$3 + value=$4 + + keys=$(yq_read_block "${source1}" "${value}") + for key in ${keys}; do + compare=$(yq4 "${key}" "${source2}") + if [[ "${compare}" == "null" ]]; then + yq_copy_block "${source1}" "${target}" "${key:1}" + fi + done } array_contains() { - local value="${1}" - shift - for element in "${@}"; do - [ "${element}" = "${value}" ] && return 0 - done - return 1 + local value="${1}" + shift + for element in "${@}"; do + [ "${element}" = "${value}" ] && return 0 + done + return 1 } -check_config() { - for config in "${@}"; do - if [[ ! -f "${config}" ]]; then - log_error "ERROR: could not find file ${config}" - exit 1 - elif [[ ! ${config} =~ ^.*\.(yaml|yml) ]]; then - log_error "ERROR: file ${config} must be a yaml file" - exit 1 - fi - done +check_config() { + for config in "${@}"; do + if [[ ! -f "${config}" ]]; then + log_error "ERROR: could not find file ${config}" + exit 1 + elif [[ ! ${config} =~ ^.*\.(yaml|yml) ]]; then + log_error "ERROR: file ${config} must be a yaml file" + exit 1 + fi + done } # Usage: merge_config # Merges the common-default, wc|sc-default, common-override, then wc|sc-override into one. merge_config() { - yq_merge "${config[default_common]}" "$1" "${config[override_common]}" "$2" > "$3" + yq_merge "${config[default_common]}" "$1" "${config[override_common]}" "$2" >"$3" } # Usage: load_config # Loads and merges the configuration into a usable tempfile at config[config_file_]. load_config() { - check_config "${config[default_common]}" "${config[override_common]}" - - if [[ "${1}" == "sc" ]]; then - check_config "${config[default_sc]}" "${config[override_sc]}" - config[config_file_sc]=$(mktemp --suffix="_sc-config.yaml") - append_trap "rm ${config[config_file_sc]}" EXIT - merge_config "${config[default_sc]}" "${config[override_sc]}" "${config[config_file_sc]}" - - elif [[ "${1}" == "wc" ]]; then - check_config "${config[default_wc]}" "${config[override_wc]}" - config[config_file_wc]=$(mktemp --suffix="_wc-config.yaml") - append_trap "rm ${config[config_file_wc]}" EXIT - merge_config "${config[default_wc]}" "${config[override_wc]}" "${config[config_file_wc]}" - - else - log_error "Error: usage load_config " - exit 1 - fi + check_config "${config[default_common]}" "${config[override_common]}" + + if [[ "${1}" == "sc" ]]; then + check_config "${config[default_sc]}" "${config[override_sc]}" + config[config_file_sc]=$(mktemp --suffix="_sc-config.yaml") + append_trap "rm ${config[config_file_sc]}" EXIT + merge_config "${config[default_sc]}" "${config[override_sc]}" "${config[config_file_sc]}" + + elif [[ "${1}" == "wc" ]]; then + check_config "${config[default_wc]}" "${config[override_wc]}" + config[config_file_wc]=$(mktemp --suffix="_wc-config.yaml") + append_trap "rm ${config[config_file_wc]}" EXIT + merge_config "${config[default_wc]}" "${config[override_wc]}" "${config[config_file_wc]}" + + else + log_error "Error: usage load_config " + exit 1 + fi } version_get() { - pushd "${root_path}" > /dev/null || exit 1 - git describe --exact-match --tags 2> /dev/null || git rev-parse HEAD - popd > /dev/null || exit 1 + pushd "${root_path}" >/dev/null || exit 1 + git describe --exact-match --tags 2>/dev/null || git rev-parse HEAD + popd >/dev/null || exit 1 } # Check if the config version matches the current CK8S version. # TODO: Simple hack to make sure version matches, we need to have a proper way # of making sure that the version is supported in the future. validate_version() { - version=$(version_get) - if [[ "${1}" == "sc" ]]; then - merged_config="${config[config_file_sc]}" - elif [[ "${1}" == "wc" ]]; then - merged_config="${config[config_file_wc]}" - else - echo log_error "Error: usage validate_version " - exit 1 - fi - ck8s_version=$(yq4 '.global.ck8sVersion' "${merged_config}") - if [[ -z "$ck8s_version" ]]; then - log_error "ERROR: No version set. Run init to generate config." - exit 1 - elif [ "${ck8s_version}" != "any" ] \ - && [ "${version}" != "${ck8s_version}" ]; then - log_error "ERROR: Version mismatch. Run init to update config." - log_error "Config version: ${ck8s_version}" - log_error "CK8S version: ${version}" - exit 1 - fi + version=$(version_get) + if [[ "${1}" == "sc" ]]; then + merged_config="${config[config_file_sc]}" + elif [[ "${1}" == "wc" ]]; then + merged_config="${config[config_file_wc]}" + else + echo log_error "Error: usage validate_version " + exit 1 + fi + ck8s_version=$(yq4 '.global.ck8sVersion' "${merged_config}") + if [[ -z "$ck8s_version" ]]; then + log_error "ERROR: No version set. Run init to generate config." + exit 1 + elif [ "${ck8s_version}" != "any" ] && + [ "${version}" != "${ck8s_version}" ]; then + log_error "ERROR: Version mismatch. Run init to update config." + log_error "Config version: ${ck8s_version}" + log_error "CK8S version: ${version}" + exit 1 + fi } # Make sure that all required configuration options are set in the config. @@ -338,174 +338,174 @@ validate_version() { # proper way of making sure that the configuration is valid in the # future. validate_config() { - log_info "Validating $1 config" - - check_conditionals() { - merged_config="${1}" - template_config="${2}" - - # Loop all lines in ${template_config} and checks if same option has conditional set-me in ${merged_config} - options="$(yq_read_block "${template_config}" "set-me-if-*")" - for opt in ${options}; do - opt_value="$(yq4 "${opt}" "${merged_config}")" - opt_value_no_list="$(yq4 "[.] | flatten | .[0]" <<< "${opt_value}")" - - if [[ "${opt_value_no_list}" =~ ^set-me-if-.*$ ]]; then - required_condition="$(sed -rn 's/^set-me-if-(.*)/\1/p' <<< "${opt_value_no_list}")" - if [[ "$(yq4 "${required_condition}" "${merged_config}")" == "true" ]]; then - # If the option is a list, set the first element in the list - if [[ "$(yq4 "${opt} | tag" "${merged_config}")" == "!!seq" ]]; then - yq4 "${opt}[0] = \"set-me\"" -i "${merged_config}" - yq4 "${opt}[0] = \"set-me\"" -i "${template_config}" - log_info "Set-me condition matched for ${opt}" - else - yq4 "${opt} = \"set-me\"" -i "${merged_config}" - yq4 "${opt} = \"set-me\"" -i "${template_config}" - log_info "Set-me condition matched for ${opt}" - fi - fi - fi - done - } - - validate() { - merged_config="${1}" - template_config="${2}" - - # Loop all lines in ${template_config} and warns if same option is not available in ${merged_config} - options=$(yq_read_block "${template_config}" "set-me") - for opt in ${options}; do - compare=$(diff <(yq4 -oj "${opt}" "${template_config}") <(yq4 -oj "${opt}" "${merged_config}") || true) - if [[ -z "${compare}" ]]; then - log_warning "WARN: ${opt} is not set in config" - maybe_exit="true" - fi - done - } - - schema_validate() { - merged_config="${1}" - schema_file="${2}" - - schema_validation_result="$(mktemp --suffix='.txt')" - append_trap "rm ${schema_validation_result}" EXIT - - if ! yajsv -s "${schema_file}" "${merged_config}" > "${schema_validation_result}"; then - log_warning "Failed schema validation:" - sed -r 's/^.*_(..-config\.yaml): fail: (.*)/\1: \2/; / failed validation$/q' < "${schema_validation_result}" - grep -oP '(?<=fail: )[^:]+' "${schema_validation_result}" | sort -u | - while read -r jpath; do - if [[ $jpath != "(root)" ]]; then - echo -n ".$jpath = " - yq4 -oj ".$jpath" "${merged_config}" - fi - done - maybe_exit="true" + log_info "Validating $1 config" + + check_conditionals() { + merged_config="${1}" + template_config="${2}" + + # Loop all lines in ${template_config} and checks if same option has conditional set-me in ${merged_config} + options="$(yq_read_block "${template_config}" "set-me-if-*")" + for opt in ${options}; do + opt_value="$(yq4 "${opt}" "${merged_config}")" + opt_value_no_list="$(yq4 "[.] | flatten | .[0]" <<<"${opt_value}")" + + if [[ "${opt_value_no_list}" =~ ^set-me-if-.*$ ]]; then + required_condition="$(sed -rn 's/^set-me-if-(.*)/\1/p' <<<"${opt_value_no_list}")" + if [[ "$(yq4 "${required_condition}" "${merged_config}")" == "true" ]]; then + # If the option is a list, set the first element in the list + if [[ "$(yq4 "${opt} | tag" "${merged_config}")" == "!!seq" ]]; then + yq4 "${opt}[0] = \"set-me\"" -i "${merged_config}" + yq4 "${opt}[0] = \"set-me\"" -i "${template_config}" + log_info "Set-me condition matched for ${opt}" + else + yq4 "${opt} = \"set-me\"" -i "${merged_config}" + yq4 "${opt} = \"set-me\"" -i "${template_config}" + log_info "Set-me condition matched for ${opt}" + fi fi - } - - template_file=$(mktemp --suffix="-tpl.yaml") - append_trap "rm ${template_file}" EXIT - - maybe_exit="false" - if [[ $1 == "sc" ]]; then - check_config "${config_template_path}/common-config.yaml" \ - "${config_template_path}/sc-config.yaml" \ - "${config_template_path}/secrets.yaml" - yq_merge "${config_template_path}/common-config.yaml" \ - "${config_template_path}/sc-config.yaml" \ - > "${template_file}" - config_to_validate="${config[config_file_sc]}" - elif [[ $1 == "wc" ]]; then - check_config "${config_template_path}/common-config.yaml" \ - "${config_template_path}/wc-config.yaml" \ - "${config_template_path}/secrets.yaml" - yq_merge "${config_template_path}/common-config.yaml" \ - "${config_template_path}/wc-config.yaml" \ - > "${template_file}" - config_to_validate="${config[config_file_wc]}" - else - log_error "ERROR: usage validate_config " - exit 1 - fi + fi + done + } - check_conditionals "${config_to_validate}" "${template_file}" - validate "${config_to_validate}" "${template_file}" - schema_validate "${config_to_validate}" "${config_template_path}/schemas/config.yaml" - check_conditionals "${secrets[secrets_file]}" "${config_template_path}/secrets.yaml" - validate "${secrets[secrets_file]}" "${config_template_path}/secrets.yaml" - schema_validate "${secrets[secrets_file]}" "${config_template_path}/schemas/secrets.yaml" + validate() { + merged_config="${1}" + template_config="${2}" + + # Loop all lines in ${template_config} and warns if same option is not available in ${merged_config} + options=$(yq_read_block "${template_config}" "set-me") + for opt in ${options}; do + compare=$(diff <(yq4 -oj "${opt}" "${template_config}") <(yq4 -oj "${opt}" "${merged_config}") || true) + if [[ -z "${compare}" ]]; then + log_warning "WARN: ${opt} is not set in config" + maybe_exit="true" + fi + done + } - if ${maybe_exit} && ! ${CK8S_AUTO_APPROVE}; then - ask_abort + schema_validate() { + merged_config="${1}" + schema_file="${2}" + + schema_validation_result="$(mktemp --suffix='.txt')" + append_trap "rm ${schema_validation_result}" EXIT + + if ! yajsv -s "${schema_file}" "${merged_config}" >"${schema_validation_result}"; then + log_warning "Failed schema validation:" + sed -r 's/^.*_(..-config\.yaml): fail: (.*)/\1: \2/; / failed validation$/q' <"${schema_validation_result}" + grep -oP '(?<=fail: )[^:]+' "${schema_validation_result}" | sort -u | + while read -r jpath; do + if [[ $jpath != "(root)" ]]; then + echo -n ".$jpath = " + yq4 -oj ".$jpath" "${merged_config}" + fi + done + maybe_exit="true" fi + } + + template_file=$(mktemp --suffix="-tpl.yaml") + append_trap "rm ${template_file}" EXIT + + maybe_exit="false" + if [[ $1 == "sc" ]]; then + check_config "${config_template_path}/common-config.yaml" \ + "${config_template_path}/sc-config.yaml" \ + "${config_template_path}/secrets.yaml" + yq_merge "${config_template_path}/common-config.yaml" \ + "${config_template_path}/sc-config.yaml" \ + >"${template_file}" + config_to_validate="${config[config_file_sc]}" + elif [[ $1 == "wc" ]]; then + check_config "${config_template_path}/common-config.yaml" \ + "${config_template_path}/wc-config.yaml" \ + "${config_template_path}/secrets.yaml" + yq_merge "${config_template_path}/common-config.yaml" \ + "${config_template_path}/wc-config.yaml" \ + >"${template_file}" + config_to_validate="${config[config_file_wc]}" + else + log_error "ERROR: usage validate_config " + exit 1 + fi + + check_conditionals "${config_to_validate}" "${template_file}" + validate "${config_to_validate}" "${template_file}" + schema_validate "${config_to_validate}" "${config_template_path}/schemas/config.yaml" + check_conditionals "${secrets[secrets_file]}" "${config_template_path}/secrets.yaml" + validate "${secrets[secrets_file]}" "${config_template_path}/secrets.yaml" + schema_validate "${secrets[secrets_file]}" "${config_template_path}/schemas/secrets.yaml" + + if ${maybe_exit} && ! ${CK8S_AUTO_APPROVE}; then + ask_abort + fi } validate_sops_config() { - if [ ! -f "${sops_config}" ]; then - log_error "ERROR: SOPS config not found: ${sops_config}" - exit 1 - fi + if [ ! -f "${sops_config}" ]; then + log_error "ERROR: SOPS config not found: ${sops_config}" + exit 1 + fi - rule_count=$(yq4 '.creation_rules | length' "${sops_config}") - if [ "${rule_count}" -eq 0 ]; then - log_error "ERROR: SOPS config contains no creation rules." - exit 1 - fi + rule_count=$(yq4 '.creation_rules | length' "${sops_config}") + if [ "${rule_count}" -eq 0 ]; then + log_error "ERROR: SOPS config contains no creation rules." + exit 1 + fi - # Compares the keyring with the sops config to see if the config has anything the keyring does not have. - keyring=$(gpg --with-colons --list-keys | awk -F: '/^pub:.*/ { getline; print $10 }') - creation_pgp=$(yq4 '[.creation_rules[].pgp // "" | split(",") | .[]] | unique | .[]' "${sops_config}") - # Pass keyring fingerprints twice to ensure other keys will not be flagged - fingerprints=$(tr ' ' '\n' <<< "${keyring} ${keyring} ${creation_pgp}" | sort | uniq -u) - - # Find rules ending with trailing comma - comma_search=$(yq4 '.creation_rules[] | select(.pgp == "*,")' "${sops_config}") - - if [ -n "${fingerprints// }" ] || [ "${comma_search: -1}" == "," ]; then - log_error "ERROR: SOPS config contains no or invalid PGP keys." - log_error "SOPS config: ${sops_config}:" - yq4 'split(" ") | {"missing or invalid fingerprints": .}' <<< "${fingerprints}" | cat - log_error "Fingerprints must be uppercase and separated by commas." - log_error "Recreate or edit the SOPS config to fix the issue" - exit 1 - fi + # Compares the keyring with the sops config to see if the config has anything the keyring does not have. + keyring=$(gpg --with-colons --list-keys | awk -F: '/^pub:.*/ { getline; print $10 }') + creation_pgp=$(yq4 '[.creation_rules[].pgp // "" | split(",") | .[]] | unique | .[]' "${sops_config}") + # Pass keyring fingerprints twice to ensure other keys will not be flagged + fingerprints=$(tr ' ' '\n' <<<"${keyring} ${keyring} ${creation_pgp}" | sort | uniq -u) + + # Find rules ending with trailing comma + comma_search=$(yq4 '.creation_rules[] | select(.pgp == "*,")' "${sops_config}") + + if [ -n "${fingerprints// /}" ] || [ "${comma_search: -1}" == "," ]; then + log_error "ERROR: SOPS config contains no or invalid PGP keys." + log_error "SOPS config: ${sops_config}:" + yq4 'split(" ") | {"missing or invalid fingerprints": .}' <<<"${fingerprints}" | cat + log_error "Fingerprints must be uppercase and separated by commas." + log_error "Recreate or edit the SOPS config to fix the issue" + exit 1 + fi } # Load and validate all configuration options from the config path. # Usage: config_load [sk] config_load() { - load_config "$1" + load_config "$1" - if [[ "--skip-validation" != "${2:-''}" ]]; then - validate_version "$1" - validate_config "$1" - validate_sops_config - fi + if [[ "--skip-validation" != "${2:-''}" ]]; then + validate_version "$1" + validate_config "$1" + validate_sops_config + fi } # Normally a signal handler can only run one command. Use this to be able to # add multiple traps for a single signal. append_trap() { - cmd="${1}" - signal="${2}" + cmd="${1}" + signal="${2}" - if [ "$(trap -p "${signal}")" = "" ]; then - # shellcheck disable=SC2064 - trap "${cmd}" "${signal}" - return - fi + if [ "$(trap -p "${signal}")" = "" ]; then + # shellcheck disable=SC2064 + trap "${cmd}" "${signal}" + return + fi - # shellcheck disable=SC2317 - previous_trap_cmd() { printf '%s\n' "$3"; } + # shellcheck disable=SC2317 + previous_trap_cmd() { printf '%s\n' "$3"; } - new_trap() { - eval "previous_trap_cmd $(trap -p "${signal}")" - printf '%s\n' "${cmd}" - } + new_trap() { + eval "previous_trap_cmd $(trap -p "${signal}")" + printf '%s\n' "${cmd}" + } - # shellcheck disable=SC2064 - trap "$(new_trap)" "${signal}" + # shellcheck disable=SC2064 + trap "$(new_trap)" "${signal}" } sops_check() { @@ -514,40 +514,40 @@ sops_check() { # Write PGP fingerprints to SOPS config sops_config_write_fingerprints() { - yq4 -n ".creation_rules[0].pgp = \"${1}\"" > "${sops_config}" || \ - (log_error "ERROR: Failed to write fingerprints" && rm "${sops_config}" && exit 1) + yq4 -n ".creation_rules[0].pgp = \"${1}\"" >"${sops_config}" || + (log_error "ERROR: Failed to write fingerprints" && rm "${sops_config}" && exit 1) } # Encrypt stdin to file. If the file already exists it's overwritten. sops_encrypt_stdin() { - sops --config "${sops_config}" -e --input-type "${1}" --output-type "${1}" /dev/stdin > "${2}" + sops --config "${sops_config}" -e --input-type "${1}" --output-type "${1}" /dev/stdin >"${2}" } # Encrypt a file in place. sops_encrypt() { - # https://github.com/getsops/sops/issues/460 - if sops_check "${1}"; then - log_info "Already encrypted ${1}" - return - fi + # https://github.com/getsops/sops/issues/460 + if sops_check "${1}"; then + log_info "Already encrypted ${1}" + return + fi - log_info "Encrypting ${1}" + log_info "Encrypting ${1}" - sops --config "${sops_config}" -e -i "${1}" + sops --config "${sops_config}" -e -i "${1}" } # Check that a file exists and is actually encrypted using SOPS. sops_decrypt_verify() { - if [ ! -f "${1}" ]; then - log_error "ERROR: Encrypted file not found: ${1}" - exit 1 - fi + if [ ! -f "${1}" ]; then + log_error "ERROR: Encrypted file not found: ${1}" + exit 1 + fi - # https://github.com/getsops/sops/issues/460 - if ! sops_check "${1}"; then - log_error "NOT ENCRYPTED: ${1}" - exit 1 - fi + # https://github.com/getsops/sops/issues/460 + if ! sops_check "${1}"; then + log_error "NOT ENCRYPTED: ${1}" + exit 1 + fi } # Decrypt a file in place and encrypt it again at exit. @@ -561,19 +561,19 @@ sops_decrypt_verify() { # TODO: This is bad since it makes the decrypted secrets touch the filesystem. # We should try to remove this asap. sops_decrypt() { - log_info "Decrypting ${1}" + log_info "Decrypting ${1}" - sops_decrypt_verify "${1}" + sops_decrypt_verify "${1}" - sops --config "${sops_config}" -d -i "${1}" - append_trap "sops_encrypt ${1}" EXIT + sops --config "${sops_config}" -d -i "${1}" + append_trap "sops_encrypt ${1}" EXIT } # Temporarily decrypts a file and runs a command that can read it once. sops_exec_file() { - sops_decrypt_verify "${1}" + sops_decrypt_verify "${1}" - sops --config "${sops_config}" exec-file "${1}" "${2}" + sops --config "${sops_config}" exec-file "${1}" "${2}" } # The same as sops_exec_file except the decrypted file is written as a normal @@ -582,58 +582,57 @@ sops_exec_file() { # decrypted file needs to be read more than once. # TODO: Try to eliminate this in the future. sops_exec_file_no_fifo() { - sops_decrypt_verify "${1}" + sops_decrypt_verify "${1}" - sops --config "${sops_config}" exec-file --no-fifo "${1}" "${2}" + sops --config "${sops_config}" exec-file --no-fifo "${1}" "${2}" } # Temporarily decrypts a file and loads the content as environment variables # that will only be available to a command. sops_exec_env() { - sops_decrypt_verify "${1}" + sops_decrypt_verify "${1}" - sops --config "${sops_config}" exec-env "${1}" "${2}" + sops --config "${sops_config}" exec-env "${1}" "${2}" } # Run a command with the secrets config options available as environment # variables. with_config_secrets() { - sops_decrypt_verify "${secrets[secrets_file]}" + sops_decrypt_verify "${secrets[secrets_file]}" - sops_exec_env "${secrets[secrets_file]}" "${*}" + sops_exec_env "${secrets[secrets_file]}" "${*}" } - # Run a command with KUBECONFIG set to a temporarily decrypted file. with_kubeconfig() { - kubeconfig="${1}" - shift + kubeconfig="${1}" + shift - if [ ! -f "${kubeconfig}" ]; then - log_error "ERROR: Kubeconfig not found: ${kubeconfig}" - exit 1 - fi + if [ ! -f "${kubeconfig}" ]; then + log_error "ERROR: Kubeconfig not found: ${kubeconfig}" + exit 1 + fi - if sops_check "${kubeconfig}"; then - log_info "Using encrypted kubeconfig ${kubeconfig}" - - # TODO: Can't use a FIFO since we can't know that the kubeconfig is not - # read multiple times. Let's try to eliminate the need for writing - # the kubeconfig to disk in the future. - sops_exec_file_no_fifo "${kubeconfig}" 'KUBECONFIG="{}" '"${*}" - else - log_info "Using unencrypted kubeconfig ${kubeconfig}" - # shellcheck disable=SC2048 - KUBECONFIG=${kubeconfig} "$@" - fi + if sops_check "${kubeconfig}"; then + log_info "Using encrypted kubeconfig ${kubeconfig}" + + # TODO: Can't use a FIFO since we can't know that the kubeconfig is not + # read multiple times. Let's try to eliminate the need for writing + # the kubeconfig to disk in the future. + sops_exec_file_no_fifo "${kubeconfig}" 'KUBECONFIG="{}" '"${*}" + else + log_info "Using unencrypted kubeconfig ${kubeconfig}" + # shellcheck disable=SC2048 + KUBECONFIG=${kubeconfig} "$@" + fi } # Runs a command with S3COMMAND_CONFIG_FILE set to a temporarily decrypted # file. with_s3cfg() { - s3cfg="${1}" - shift - # TODO: Can't use a FIFO since the s3cfg is read multiple times when a - # bucket needs to be created. - sops_exec_file_no_fifo "${s3cfg}" 'S3COMMAND_CONFIG_FILE="{}" '"${*}" + s3cfg="${1}" + shift + # TODO: Can't use a FIFO since the s3cfg is read multiple times when a + # bucket needs to be created. + sops_exec_file_no_fifo "${s3cfg}" 'S3COMMAND_CONFIG_FILE="{}" '"${*}" } diff --git a/bin/diagnostics.bash b/bin/diagnostics.bash index b5224448b6..201ab12b50 100755 --- a/bin/diagnostics.bash +++ b/bin/diagnostics.bash @@ -7,22 +7,22 @@ here="$(dirname "$(readlink -f "$0")")" source "${here}/common.bash" usage() { - log_info "usage: ck8s diagnostics [command] [options]" - log_info "" - log_info "Collects diagnostics from the current environment set by CK8S_CONFIG_PATH and" - log_info "store them in a file in the CK8S_CONFIG_PATH directory encrypted with SOPS using" - log_info "by default GPG keys found in CK8S_CONFIG_PATH/diagnostics_receiver.gpg or by" - log_info "setting the CK8S_PGP_FP environment variable manually." - log_info "" - log_info "Commands:" - log_info " namespace run diagnostics for specified namespace only" - log_info " query-default-metrics-since query a predefined set of metrics since the specified date" - log_info " query-metric query any arbitrary metric" - log_info "" - log_info "Global options:" - log_info " -h, --help display help for this command and exit" - log_info " --include-config include config yaml files found in CK8S_CONFIG_PATH" - exit 1 + log_info "usage: ck8s diagnostics [command] [options]" + log_info "" + log_info "Collects diagnostics from the current environment set by CK8S_CONFIG_PATH and" + log_info "store them in a file in the CK8S_CONFIG_PATH directory encrypted with SOPS using" + log_info "by default GPG keys found in CK8S_CONFIG_PATH/diagnostics_receiver.gpg or by" + log_info "setting the CK8S_PGP_FP environment variable manually." + log_info "" + log_info "Commands:" + log_info " namespace run diagnostics for specified namespace only" + log_info " query-default-metrics-since query a predefined set of metrics since the specified date" + log_info " query-metric query any arbitrary metric" + log_info "" + log_info "Global options:" + log_info " -h, --help display help for this command and exit" + log_info " --include-config include config yaml files found in CK8S_CONFIG_PATH" + exit 1 } gpg_file="${CK8S_CONFIG_PATH}/diagnostics_receiver.gpg" @@ -33,56 +33,57 @@ command_arg="" shift -while [ "${#}" -gt 0 ] ; do - case "${1}" in - -h | --help) - usage - ;; - --include-config) - include_config=true - ;; - namespace|query-default-metrics-since|query-metric) - [[ ${#} -ge 2 && "${2}" != -* && -z "$sub_command" ]] || usage - sub_command="${1:-}" - command_arg="${2:-}" - shift - ;; - *) - log_error "ERROR: invalid argument: \"${1:-}\"" - usage - ;; - esac +while [ "${#}" -gt 0 ]; do + case "${1}" in + -h | --help) + usage + ;; + --include-config) + include_config=true + ;; + namespace | query-default-metrics-since | query-metric) + [[ ${#} -ge 2 && "${2}" != -* && -z "$sub_command" ]] || usage + sub_command="${1:-}" + command_arg="${2:-}" shift + ;; + *) + log_error "ERROR: invalid argument: \"${1:-}\"" + usage + ;; + esac + shift done log_self_managed_notice() { - log_warning "WARNING: Notice for self-managed customers:" + log_warning "WARNING: Notice for self-managed customers:" - echo -e "\tIf you are an Elastisys self-managed customer, you can send diagnostic data to Elastisys." 1>&2 - echo -e "\tMake sure to store GPG keys retrieved during onboarding in a file named:\n" 1>&2 - echo -e "\t\${CK8S_CONFIG_PATH}/diagnostics_receiver.gpg\n" 1>&2 + echo -e "\tIf you are an Elastisys self-managed customer, you can send diagnostic data to Elastisys." 1>&2 + echo -e "\tMake sure to store GPG keys retrieved during onboarding in a file named:\n" 1>&2 + echo -e "\t\${CK8S_CONFIG_PATH}/diagnostics_receiver.gpg\n" 1>&2 - echo -e "\tIf you are an Elastisys self-managed customer, you get support by contacting sme-support@elastisys.com\n" 1>&2 + echo -e "\tIf you are an Elastisys self-managed customer, you get support by contacting sme-support@elastisys.com\n" 1>&2 - usage + usage } import_gpg_file() { - local fingerprints - local gpg_file="${1}" - if [[ ! -f "${gpg_file}" ]]; then - log_error "ERROR: file \"${gpg_file}\" not found" - log_self_managed_notice - fi - log_info "Attempting to import GPG keys from ${gpg_file}" - - if ! gpg --import "${gpg_file}"; then - log_error "ERROR: Could not import GPG keys from ${gpg_file}" - log_self_managed_notice - fi - - # get only fingerprints used for encryption - mapfile -t fingerprints < <(gpg --with-colons --import-options show-only --import --fingerprint "${CK8S_CONFIG_PATH}/diagnostics_receiver.gpg" | awk -F: ' + local fingerprints + local gpg_file="${1}" + if [[ ! -f "${gpg_file}" ]]; then + log_error "ERROR: file \"${gpg_file}\" not found" + log_self_managed_notice + fi + log_info "Attempting to import GPG keys from ${gpg_file}" + + if ! gpg --import "${gpg_file}"; then + log_error "ERROR: Could not import GPG keys from ${gpg_file}" + log_self_managed_notice + fi + + # get only fingerprints used for encryption + mapfile -t fingerprints < <( + gpg --with-colons --import-options show-only --import --fingerprint "${CK8S_CONFIG_PATH}/diagnostics_receiver.gpg" | awk -F: ' /^fpr/ { if (!main_fpr) { print $10; @@ -92,318 +93,321 @@ import_gpg_file() { /^pub/ { main_fpr = 0; }' - ) - CK8S_PGP_FP=$(IFS=, ; echo "${fingerprints[*]}") + ) + CK8S_PGP_FP=$( + IFS=, + echo "${fingerprints[*]}" + ) } sops_encrypt_file() { - if [ -z "${CK8S_PGP_FP:-}" ]; then - sops_encrypt "${file}" - return - fi + if [ -z "${CK8S_PGP_FP:-}" ]; then + sops_encrypt "${file}" + return + fi - log_info "Encrypting ${file}" + log_info "Encrypting ${file}" - sops --pgp "${CK8S_PGP_FP}" -e -i "${file}" + sops --pgp "${CK8S_PGP_FP}" -e -i "${file}" } fetch_oidc_token() { - # shellcheck disable=SC2016 - readarray -t args <<< "$(yq4 '. as $root | ($root.contexts[] | select(.name == $root.current-context) | .context) as $context | ($root.users[] | select(.name == $context.user) | .user) as $user | $user.exec.args[]' "${config["kube_config_sc"]}")" - [[ "${args[0]}" == "oidc-login" ]] || log_fatal "ERROR: This command requires the kubeconfig to use OIDC" - kubectl "${args[@]}" | yq4 '.status.token' + # shellcheck disable=SC2016 + readarray -t args <<<"$(yq4 '. as $root | ($root.contexts[] | select(.name == $root.current-context) | .context) as $context | ($root.users[] | select(.name == $context.user) | .user) as $user | $user.exec.args[]' "${config["kube_config_sc"]}")" + [[ "${args[0]}" == "oidc-login" ]] || log_fatal "ERROR: This command requires the kubeconfig to use OIDC" + kubectl "${args[@]}" | yq4 '.status.token' } run_diagnostics() { - # -- ck8s -- - echo "Fetching CK8S software versions" - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - if [ -d "${CK8S_CONFIG_PATH}/capi" ]; then - # shellcheck disable=SC2002 - capi_version=$(cat "${CK8S_CONFIG_PATH}/capi/defaults/values.yaml" | yq4 '.clusterApiVersion') - echo "CAPI version: ${capi_version}" - elif [ -d "${CK8S_CONFIG_PATH}/${cluster}-config" ]; then - # shellcheck disable=SC2002 - kubespray_version=$(cat "${CK8S_CONFIG_PATH}/${cluster}-config/group_vars/all/ck8s-kubespray-general.yaml" | yq4 '.ck8sKubesprayVersion') - echo "Kubespray version: ${kubespray_version}" - else - echo "Can't find config directory" - fi - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + # -- ck8s -- + echo "Fetching CK8S software versions" + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + if [ -d "${CK8S_CONFIG_PATH}/capi" ]; then # shellcheck disable=SC2002 - apps_version=$(cat "${CK8S_CONFIG_PATH}/defaults/common-config.yaml" | yq4 '.global.ck8sVersion') - echo "Apps version: ${apps_version}" - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - # -- Nodes -- - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo "Fetching Nodes that are NotReady ()" - nodes=$("${here}/ops.bash" kubectl "${cluster}" get nodes -o=yaml | yq4 '.items[] | select(.status.conditions[] | select(.type == "Ready" and .status != "True")) | .metadata.name' | tr '\n' ' ') - if [ -z "${nodes}" ]; then - echo -e "All Nodes are ready" - else - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo "${nodes}" | xargs "${here}/ops.bash" kubectl "${cluster}" get nodes -o wide - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo -e "\nDescribing Nodes" - echo "${nodes}" | xargs "${here}/ops.bash" kubectl "${cluster}" describe nodes - fi - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - # -- DS and Deployments -- - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo -e "\nFetching Deployments without desired number of ready pods ()" - deployments=$("${here}"/ops.bash kubectl "${cluster}" get deployments -A -o=yaml | yq4 '.items[] | select(.status.conditions[] | select((.type == "Progressing" and .status != "True") or (.type == "Available" and .status != "True")))') - if [ -z "${deployments}" ]; then - echo -e "All Deployments are ready" - else - echo "${deployments}" - fi - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - echo -e "\nFetching DaemonSets without desired number of ready pods ()" - daemonsets=$("${here}"/ops.bash kubectl "${cluster}" get daemonsets -A -o=yaml | yq4 '.items[] | select(.status.numberMisscheduled != 0)') - if [ -z "${daemonsets}" ]; then - echo -e "All daemonsets are ready" - else - echo "${daemonsets}" - fi - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - echo -e "\nFetching StatefulSets without desired number of ready pods ()" - statefulsets=$("${here}"/ops.bash kubectl "${cluster}" get statefulsets -A -o=yaml | yq4 '.items[] | select(.status.collisionCount != 0 and .status.readyReplicas != .status.updatedReplicas and .status.replicas != .status.readyReplicas)') - if [ -z "${statefulsets}" ]; then - echo -e "All statefulsets are ready" - else - echo "${statefulsets}" - fi - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - # -- Pods -- - echo -e "\nFetching Pods that are NotReady ()" - pods=$("${here}/ops.bash" kubectl "${cluster}" get pod -A -o=yaml | yq4 '.items[] | select(.status.conditions[] | select(.type == "Ready" and .status != "True" and .reason != "PodCompleted")) | [{"name": .metadata.name, "namespace": .metadata.namespace}]') - readarray pod_arr < <(echo "$pods" | yq4 e -o=j -I=0 '.[]') - - if [ "${pods}" == '[]' ]; then - echo -e "All pods are ready" - else - for pod in "${pod_arr[@]}"; do - pod_name=$(echo "$pod" | jq -r '.name') - namespace=$(echo "$pod" | jq -r '.namespace') - - echo -e "\nDescribing pod <${pod_name}>" - "${here}/ops.bash" kubectl "${cluster}" describe pod "${pod_name}" -n "${namespace}" - - echo -e "\nGetting logs from pod: <${pod_name}>" - logs=$("${here}/ops.bash" kubectl "${cluster}" logs "${pod_name}" -n "${namespace}" --tail 20 || true) - status="$?" - if [ "${status}" -eq 0 ]; then - echo "${logs}" - fi - - echo -e "\nGetting previous logs from pod: <${pod_name}>" - logs_prev=$("${here}/ops.bash" kubectl "${cluster}" logs -p "${pod_name}" -n "${namespace}" --tail 20 || true) - status="$?" - if [ "${status}" -eq 0 ]; then - echo "${logs_prev}" - fi - done - fi - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - # -- Top -- - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo -e "\nFetching cluster resource usage " - "${here}/ops.bash" kubectl "${cluster}" top nodes - "${here}/ops.bash" kubectl "${cluster}" top pods -A - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - # -- Helm -- - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo -e "\nFetching Helm releases that are not deployed ()" - helm=$("${here}"/ops.bash helm "${cluster}" list -A --all -o yaml | yq4 '.[] | select(.status != "deployed")') - if [ -z "${helm}" ]; then - echo -e "All charts are deployed" - else - echo "${helm}" - fi - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - # -- Cert -- - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo -e "\nFetching cert-manager resources ()" - "${here}/ops.bash" kubectl "${cluster}" get clusterissuers,issuers,certificates,orders,challenges --all-namespaces -o wide - - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo -e "\nDescribing failed Challenges ()" - challenges=$("${here}/ops.bash" kubectl "${cluster}" get challenge -A -o=yaml | yq4 '.items[] | select(.status.state != "valid") | [{"name": .metadata.name, "namespace": .metadata.namespace}]') - readarray challenge_arr < <(echo "$challenges" | yq4 e -o=j -I=0 '.[]') - if [ "${challenges}" == '[]' ]; then - echo -e "All challenges are valid" - else - for challenge in "${challenge_arr[@]}"; do - challenge_name=$(echo "$challenge" | jq -r '.name') - namespace=$(echo "$challenge" | jq -r '.namespace') - "${here}/ops.bash" kubectl "${cluster}" describe challenge "${challenge_name}" -n "${namespace}" - done - fi - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - # -- Events -- - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo -e "\nFetching all Events ()" - "${here}/ops.bash" kubectl "${cluster}" get events -A --sort-by=.metadata.creationTimestamp - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - # # -- Test -- - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - "${here}/ck8s" test "${cluster}" + capi_version=$(cat "${CK8S_CONFIG_PATH}/capi/defaults/values.yaml" | yq4 '.clusterApiVersion') + echo "CAPI version: ${capi_version}" + elif [ -d "${CK8S_CONFIG_PATH}/${cluster}-config" ]; then + # shellcheck disable=SC2002 + kubespray_version=$(cat "${CK8S_CONFIG_PATH}/${cluster}-config/group_vars/all/ck8s-kubespray-general.yaml" | yq4 '.ck8sKubesprayVersion') + echo "Kubespray version: ${kubespray_version}" + else + echo "Can't find config directory" + fi + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + # shellcheck disable=SC2002 + apps_version=$(cat "${CK8S_CONFIG_PATH}/defaults/common-config.yaml" | yq4 '.global.ck8sVersion') + echo "Apps version: ${apps_version}" + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + # -- Nodes -- + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo "Fetching Nodes that are NotReady ()" + nodes=$("${here}/ops.bash" kubectl "${cluster}" get nodes -o=yaml | yq4 '.items[] | select(.status.conditions[] | select(.type == "Ready" and .status != "True")) | .metadata.name' | tr '\n' ' ') + if [ -z "${nodes}" ]; then + echo -e "All Nodes are ready" + else + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo "${nodes}" | xargs "${here}/ops.bash" kubectl "${cluster}" get nodes -o wide + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo -e "\nDescribing Nodes" + echo "${nodes}" | xargs "${here}/ops.bash" kubectl "${cluster}" describe nodes + fi + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + # -- DS and Deployments -- + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo -e "\nFetching Deployments without desired number of ready pods ()" + deployments=$("${here}"/ops.bash kubectl "${cluster}" get deployments -A -o=yaml | yq4 '.items[] | select(.status.conditions[] | select((.type == "Progressing" and .status != "True") or (.type == "Available" and .status != "True")))') + if [ -z "${deployments}" ]; then + echo -e "All Deployments are ready" + else + echo "${deployments}" + fi + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + echo -e "\nFetching DaemonSets without desired number of ready pods ()" + daemonsets=$("${here}"/ops.bash kubectl "${cluster}" get daemonsets -A -o=yaml | yq4 '.items[] | select(.status.numberMisscheduled != 0)') + if [ -z "${daemonsets}" ]; then + echo -e "All daemonsets are ready" + else + echo "${daemonsets}" + fi + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + echo -e "\nFetching StatefulSets without desired number of ready pods ()" + statefulsets=$("${here}"/ops.bash kubectl "${cluster}" get statefulsets -A -o=yaml | yq4 '.items[] | select(.status.collisionCount != 0 and .status.readyReplicas != .status.updatedReplicas and .status.replicas != .status.readyReplicas)') + if [ -z "${statefulsets}" ]; then + echo -e "All statefulsets are ready" + else + echo "${statefulsets}" + fi + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + # -- Pods -- + echo -e "\nFetching Pods that are NotReady ()" + pods=$("${here}/ops.bash" kubectl "${cluster}" get pod -A -o=yaml | yq4 '.items[] | select(.status.conditions[] | select(.type == "Ready" and .status != "True" and .reason != "PodCompleted")) | [{"name": .metadata.name, "namespace": .metadata.namespace}]') + readarray pod_arr < <(echo "$pods" | yq4 e -o=j -I=0 '.[]') + + if [ "${pods}" == '[]' ]; then + echo -e "All pods are ready" + else + for pod in "${pod_arr[@]}"; do + pod_name=$(echo "$pod" | jq -r '.name') + namespace=$(echo "$pod" | jq -r '.namespace') + + echo -e "\nDescribing pod <${pod_name}>" + "${here}/ops.bash" kubectl "${cluster}" describe pod "${pod_name}" -n "${namespace}" + + echo -e "\nGetting logs from pod: <${pod_name}>" + logs=$("${here}/ops.bash" kubectl "${cluster}" logs "${pod_name}" -n "${namespace}" --tail 20 || true) + status="$?" + if [ "${status}" -eq 0 ]; then + echo "${logs}" + fi + + echo -e "\nGetting previous logs from pod: <${pod_name}>" + logs_prev=$("${here}/ops.bash" kubectl "${cluster}" logs -p "${pod_name}" -n "${namespace}" --tail 20 || true) + status="$?" + if [ "${status}" -eq 0 ]; then + echo "${logs_prev}" + fi + done + fi + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + # -- Top -- + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo -e "\nFetching cluster resource usage " + "${here}/ops.bash" kubectl "${cluster}" top nodes + "${here}/ops.bash" kubectl "${cluster}" top pods -A + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + # -- Helm -- + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo -e "\nFetching Helm releases that are not deployed ()" + helm=$("${here}"/ops.bash helm "${cluster}" list -A --all -o yaml | yq4 '.[] | select(.status != "deployed")') + if [ -z "${helm}" ]; then + echo -e "All charts are deployed" + else + echo "${helm}" + fi + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + # -- Cert -- + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo -e "\nFetching cert-manager resources ()" + "${here}/ops.bash" kubectl "${cluster}" get clusterissuers,issuers,certificates,orders,challenges --all-namespaces -o wide + + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo -e "\nDescribing failed Challenges ()" + challenges=$("${here}/ops.bash" kubectl "${cluster}" get challenge -A -o=yaml | yq4 '.items[] | select(.status.state != "valid") | [{"name": .metadata.name, "namespace": .metadata.namespace}]') + readarray challenge_arr < <(echo "$challenges" | yq4 e -o=j -I=0 '.[]') + if [ "${challenges}" == '[]' ]; then + echo -e "All challenges are valid" + else + for challenge in "${challenge_arr[@]}"; do + challenge_name=$(echo "$challenge" | jq -r '.name') + namespace=$(echo "$challenge" | jq -r '.namespace') + "${here}/ops.bash" kubectl "${cluster}" describe challenge "${challenge_name}" -n "${namespace}" + done + fi + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + # -- Events -- + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo -e "\nFetching all Events ()" + "${here}/ops.bash" kubectl "${cluster}" get events -A --sort-by=.metadata.creationTimestamp + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + # # -- Test -- + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + "${here}/ck8s" test "${cluster}" } run_diagnostics_namespaced() { - namespace="${1}" - echo "Running in the ${namespace} namespace" - # -- Pods -- - echo -e "Fetching All Pods " - "${here}/ops.bash" kubectl "${cluster}" get pods -n "${namespace}" -o yaml - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - # -- Top -- - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo "Fetching Pods Resources Usage " - "${here}/ops.bash" kubectl "${cluster}" top pods -n "${namespace}" - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - # -- Deployments -- - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo "Fetching Deployments " - "${here}/ops.bash" kubectl "${cluster}" get deployments -n "${namespace}" -o yaml - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - # -- Daemonsets -- - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo "Fetching Daemonsets " - "${here}/ops.bash" kubectl "${cluster}" get daemonsets -n "${namespace}" -o yaml - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - # -- Statefulsets -- - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo "Fetching Statefulsets " - "${here}/ops.bash" kubectl "${cluster}" get statefulsets -n "${namespace}" -o yaml - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - # -- Events -- - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo "Fetching Events " - "${here}/ops.bash" kubectl "${cluster}" get events -n "${namespace}" - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - # -- ConfigMaps -- - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo "Fetching ConfigMaps " - "${here}/ops.bash" kubectl "${cluster}" get configmaps -n "${namespace}" -o yaml - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - - # -- Logs -- - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo "Fetching Error Logs " - mapfile -t pods_arr < <("${here}/ops.bash" kubectl "${cluster}" get pods -n "${namespace}" -o yaml | yq4 '.items[] | .metadata.name') - for pod in "${pods_arr[@]}"; do - echo "Error logs for pod: ${pod}" - "${here}/ops.bash" kubectl "${cluster}" logs -n "${namespace}" "${pod}" | \ - # need 'OR true' here to not exit script in case of no match - grep -iE '401|403|500|bad|blocked|denied|deny|err|expired|fail|unauthorized|unknown' || true - done + namespace="${1}" + echo "Running in the ${namespace} namespace" + # -- Pods -- + echo -e "Fetching All Pods " + "${here}/ops.bash" kubectl "${cluster}" get pods -n "${namespace}" -o yaml + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + # -- Top -- + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo "Fetching Pods Resources Usage " + "${here}/ops.bash" kubectl "${cluster}" top pods -n "${namespace}" + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + # -- Deployments -- + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo "Fetching Deployments " + "${here}/ops.bash" kubectl "${cluster}" get deployments -n "${namespace}" -o yaml + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + # -- Daemonsets -- + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo "Fetching Daemonsets " + "${here}/ops.bash" kubectl "${cluster}" get daemonsets -n "${namespace}" -o yaml + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + # -- Statefulsets -- + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo "Fetching Statefulsets " + "${here}/ops.bash" kubectl "${cluster}" get statefulsets -n "${namespace}" -o yaml + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + # -- Events -- + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo "Fetching Events " + "${here}/ops.bash" kubectl "${cluster}" get events -n "${namespace}" + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + # -- ConfigMaps -- + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo "Fetching ConfigMaps " + "${here}/ops.bash" kubectl "${cluster}" get configmaps -n "${namespace}" -o yaml + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + + # -- Logs -- + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo "Fetching Error Logs " + mapfile -t pods_arr < <("${here}/ops.bash" kubectl "${cluster}" get pods -n "${namespace}" -o yaml | yq4 '.items[] | .metadata.name') + for pod in "${pods_arr[@]}"; do + echo "Error logs for pod: ${pod}" + "${here}/ops.bash" kubectl "${cluster}" logs -n "${namespace}" "${pod}" | + # need 'OR true' here to not exit script in case of no match + grep -iE '401|403|500|bad|blocked|denied|deny|err|expired|fail|unauthorized|unknown' || true + done } get_config_files() { - mapfile -t config_files < <(find "${CK8S_CONFIG_PATH}" -name "*-config.yaml") - - for config_file in "${config_files[@]}"; do - printf '\n%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo -n "Config file: " - if [[ $(basename "$(dirname "${config_file}")") == "defaults" ]]; then - echo "defaults/$(basename "${config_file}")" - else - basename "${config_file}" - fi - cat "${config_file}" - done + mapfile -t config_files < <(find "${CK8S_CONFIG_PATH}" -name "*-config.yaml") + + for config_file in "${config_files[@]}"; do + printf '\n%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo -n "Config file: " + if [[ $(basename "$(dirname "${config_file}")") == "defaults" ]]; then + echo "defaults/$(basename "${config_file}")" + else + basename "${config_file}" + fi + cat "${config_file}" + done } run_diagnostics_default_metrics() { - token="$(fetch_oidc_token)" - domain="https://kube.$(yq4 '.global.opsDomain' "${config["config_file_sc"]}"):6443" - endpoint="${domain}/api/v1/namespaces/thanos/services/thanos-query-query-frontend:9090/proxy/api/v1" - header="Authorization: Bearer ${token}" - range_arg=("--data-urlencode" "start=$(date -d -"${1}" -Iseconds)" "--data-urlencode" "end=$(date -Iseconds)" "--data-urlencode" "step=1m") - - query_and_parse() { - query="${1}" - print_func="${2}" - res="$(curl "${endpoint}/query_range" -k -s --header "${header}" --data-urlencode query="${query}" "${range_arg[@]}")" - if [[ $(jq '.data.result | length' <<< "${res}") -gt 0 ]]; then - readarray metric_results_arr < <(jq -c '.data.result[]' <<< "${res}") - for row in "${metric_results_arr[@]}"; do - "${print_func}" "${row}" - done - fi - } - - print_fluentd() { - echo "Fluentd output error rate over 0 on the dates:" - jq '.values[][0]' <<< "${1}" | xargs -I {} date -d@{} - echo - } - - # Fluentd output error/retry rate - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo "Querying fluentd output error rate." - query_and_parse 'sum(rate(fluentd_output_status_retry_count[1m])) > 0' print_fluentd - - print_dropped_packages() { - direction="$([[ $(jq -r .metric.type <<< "${1}") == "fw" ]] && echo "from" || echo "to")" - pod="$(jq '.metric.exported_pod' <<< "${1}")" - echo "Found dropped packages going ${direction} pod: ${pod} on dates:" - jq '.values[][0]' <<< "${1}" | xargs -I {} date -d@{} - echo - } - - # Dropped packets going from pod - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo "Querying dropped packages." - query_and_parse 'rate(no_policy_drop_counter[1m]) > 0' print_dropped_packages - - print_uptime() { - echo "The target $(jq '.metric.target' <<< "${1}") was down on dates:" - jq '.values[][0]' <<< "${1}" | xargs -I {} date -d@{} - echo - } - - # Uptime - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo "Querying uptime." - query_and_parse 'max by (target) (probe_success) < 1' print_uptime - - # Opensearch status - printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - - echo "Querying Opensearch cluster status" - res="$(curl "${endpoint}/query" -k -s --header "${header}" --data-urlencode query='elasticsearch_cluster_health_status{color=~"yellow|red"} > 0')" - if [[ $(jq '.data.result | length' <<< "${res}" ) -gt 0 ]]; then - echo "Opensearch is in $(jq '.data.result[0].metric.color' <<< "${res}") state!" + token="$(fetch_oidc_token)" + domain="https://kube.$(yq4 '.global.opsDomain' "${config["config_file_sc"]}"):6443" + endpoint="${domain}/api/v1/namespaces/thanos/services/thanos-query-query-frontend:9090/proxy/api/v1" + header="Authorization: Bearer ${token}" + range_arg=("--data-urlencode" "start=$(date -d -"${1}" -Iseconds)" "--data-urlencode" "end=$(date -Iseconds)" "--data-urlencode" "step=1m") + + query_and_parse() { + query="${1}" + print_func="${2}" + res="$(curl "${endpoint}/query_range" -k -s --header "${header}" --data-urlencode query="${query}" "${range_arg[@]}")" + if [[ $(jq '.data.result | length' <<<"${res}") -gt 0 ]]; then + readarray metric_results_arr < <(jq -c '.data.result[]' <<<"${res}") + for row in "${metric_results_arr[@]}"; do + "${print_func}" "${row}" + done fi + } + + print_fluentd() { + echo "Fluentd output error rate over 0 on the dates:" + jq '.values[][0]' <<<"${1}" | xargs -I {} date -d@{} + echo + } + + # Fluentd output error/retry rate + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo "Querying fluentd output error rate." + query_and_parse 'sum(rate(fluentd_output_status_retry_count[1m])) > 0' print_fluentd + + print_dropped_packages() { + direction="$([[ $(jq -r .metric.type <<<"${1}") == "fw" ]] && echo "from" || echo "to")" + pod="$(jq '.metric.exported_pod' <<<"${1}")" + echo "Found dropped packages going ${direction} pod: ${pod} on dates:" + jq '.values[][0]' <<<"${1}" | xargs -I {} date -d@{} + echo + } + + # Dropped packets going from pod + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo "Querying dropped packages." + query_and_parse 'rate(no_policy_drop_counter[1m]) > 0' print_dropped_packages + + print_uptime() { + echo "The target $(jq '.metric.target' <<<"${1}") was down on dates:" + jq '.values[][0]' <<<"${1}" | xargs -I {} date -d@{} + echo + } + + # Uptime + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo "Querying uptime." + query_and_parse 'max by (target) (probe_success) < 1' print_uptime + + # Opensearch status + printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + echo "Querying Opensearch cluster status" + res="$(curl "${endpoint}/query" -k -s --header "${header}" --data-urlencode query='elasticsearch_cluster_health_status{color=~"yellow|red"} > 0')" + if [[ $(jq '.data.result | length' <<<"${res}") -gt 0 ]]; then + echo "Opensearch is in $(jq '.data.result[0].metric.color' <<<"${res}") state!" + fi } # run_diagnostics_query_metric run_diagnostics_query_metric() { - token=$(fetch_oidc_token) - domain="https://kube.$(yq4 '.global.opsDomain' "${config["config_file_sc"]}"):6443" - endpoint="${domain}/api/v1/namespaces/thanos/services/thanos-query-query-frontend:9090/proxy/api/v1" - header="Authorization: Bearer ${token}" + token=$(fetch_oidc_token) + domain="https://kube.$(yq4 '.global.opsDomain' "${config["config_file_sc"]}"):6443" + endpoint="${domain}/api/v1/namespaces/thanos/services/thanos-query-query-frontend:9090/proxy/api/v1" + header="Authorization: Bearer ${token}" - curl "${endpoint}/query" -k --header "${header}" --data-urlencode query="${1}" | jq + curl "${endpoint}/query" -k --header "${header}" --data-urlencode query="${1}" | jq } if [[ -z "${CK8S_PGP_FP:-}" ]]; then - import_gpg_file "${gpg_file}" + import_gpg_file "${gpg_file}" fi log_info "Using the following fingerprints:" log_info "${CK8S_PGP_FP}" @@ -418,26 +422,26 @@ log_info "Running diagnostics..." export CK8S_AUTO_APPROVE=true case "${sub_command}" in - namespace) - # check that namespace exists - "${here}/ops.bash" kubectl "${cluster}" get namespace "${command_arg}" >/dev/null - run_diagnostics_namespaced "${command_arg}" >"${file}" 2>&1 - ;; - query-default-metrics-since) - # Verify date argument - date -d -"${command_arg}" >/dev/null - run_diagnostics_default_metrics "${command_arg}" >"${file}" 2>&1 - ;; - query-metric) - run_diagnostics_query_metric "${command_arg}" >"${file}" 2>&1 - ;; - *) - run_diagnostics >"${file}" 2>&1 - ;; +namespace) + # check that namespace exists + "${here}/ops.bash" kubectl "${cluster}" get namespace "${command_arg}" >/dev/null + run_diagnostics_namespaced "${command_arg}" >"${file}" 2>&1 + ;; +query-default-metrics-since) + # Verify date argument + date -d -"${command_arg}" >/dev/null + run_diagnostics_default_metrics "${command_arg}" >"${file}" 2>&1 + ;; +query-metric) + run_diagnostics_query_metric "${command_arg}" >"${file}" 2>&1 + ;; +*) + run_diagnostics >"${file}" 2>&1 + ;; esac if [[ "${include_config}" == "true" ]]; then - get_config_files >>"${file}" 2>&1 + get_config_files >>"${file}" 2>&1 fi log_info "Diagnostics done. Saving and encrypting file ${file}" diff --git a/bin/explain.bash b/bin/explain.bash index 08a08bf69a..56071f03de 100755 --- a/bin/explain.bash +++ b/bin/explain.bash @@ -42,8 +42,8 @@ recvdereference() { local data data="$(dereference "$1")" - while [[ "$(hasreference - <<< "${data}")" == true ]]; do - data="$(dereference - <<< "${data}")" + while [[ "$(hasreference - <<<"${data}")" == true ]]; do + data="$(dereference - <<<"${data}")" done echo "${data}" @@ -58,22 +58,22 @@ explain() { if [[ -n "${target}" ]]; then local -a path - readarray -t path <<< "$(yq4 'split "." | .[]' <<< "${target}")" + readarray -t path <<<"$(yq4 'split "." | .[]' <<<"${target}")" for key in "${path[@]}"; do - case "$(yq4 ".type" <<< "${data}")" in + case "$(yq4 ".type" <<<"${data}")" in array) - data="$(yq4 ".items.properties.${key}" <<< "${data}")" + data="$(yq4 ".items.properties.${key}" <<<"${data}")" ;; object) if [[ "${key}" == "additionalProperties" ]]; then - data="$(yq4 ".additionalProperties" <<< "${data}")" + data="$(yq4 ".additionalProperties" <<<"${data}")" else - data="$(yq4 ".properties.${key}" <<< "${data}")" + data="$(yq4 ".properties.${key}" <<<"${data}")" fi ;; *) - log_error "unable to navigate to ${key} on path to ${target} found unnavigable type: $(yq4 ".type" <<< "${data}")" + log_error "unable to navigate to ${key} on path to ${target} found unnavigable type: $(yq4 ".type" <<<"${data}")" exit 1 ;; esac @@ -87,22 +87,22 @@ explain() { local title desc type subtype - title="$(yq4 ".title // \"${2:-root}\"" <<< "${data}")" - desc="$(yq4 '.description // "no description"' <<< "${data}")" - type="$(yq4 '.type' <<< "${data}")" + title="$(yq4 ".title // \"${2:-root}\"" <<<"${data}")" + desc="$(yq4 '.description // "no description"' <<<"${data}")" + type="$(yq4 '.type' <<<"${data}")" case "${type}" in array) - subtype="$(yq4 '.items.type // "any"' <<< "${data}")" + subtype="$(yq4 '.items.type // "any"' <<<"${data}")" ;; object) - if [[ "$(yq4 '.properties // {} | length == 0 and .additionalProperties != false' <<< "${data}")" == "true" ]]; then - subtype="$(yq4 '.additionalProperties.type // "any"' <<< "${data}")" + if [[ "$(yq4 '.properties // {} | length == 0 and .additionalProperties != false' <<<"${data}")" == "true" ]]; then + subtype="$(yq4 '.additionalProperties.type // "any"' <<<"${data}")" fi ;; esac - if [[ "$(yq4 '.enum // [] | length != 0' <<< "${data}")" == "true" ]]; then + if [[ "$(yq4 '.enum // [] | length != 0' <<<"${data}")" == "true" ]]; then subtype="${type}" type="enum" fi @@ -128,32 +128,32 @@ explain() { case "${type}" in array) - case "$(yq4 '.items.type' <<< "${data}")" in + case "$(yq4 '.items.type' <<<"${data}")" in object) echo - yq4 '{"properties": .items.properties | keys}' <<< "${data}" + yq4 '{"properties": .items.properties | keys}' <<<"${data}" ;; esac ;; enum) echo - yq4 '{"options": .enum}' <<< "${data}" + yq4 '{"options": .enum}' <<<"${data}" ;; object) - if [[ "$(yq4 '.properties // {} | length != 0' <<< "${data}")" == "true" ]]; then + if [[ "$(yq4 '.properties // {} | length != 0' <<<"${data}")" == "true" ]]; then echo - yq4 '{"properties": .properties | keys}' <<< "${data}" + yq4 '{"properties": .properties | keys}' <<<"${data}" fi ;; esac - if [[ "$(yq4 '.default == null' <<< "${data}")" == "false" ]]; then + if [[ "$(yq4 '.default == null' <<<"${data}")" == "false" ]]; then echo - yq4 '{"defaults": .default}' <<< "${data}" - elif [[ "$(yq4 '(.type == "array") and (.items.properties // {} | length == 0)' <<< "${data}")" == "true" ]]; then + yq4 '{"defaults": .default}' <<<"${data}" + elif [[ "$(yq4 '(.type == "array") and (.items.properties // {} | length == 0)' <<<"${data}")" == "true" ]]; then echo echo "This array lacks defaults" - elif [[ "$(yq4 '(.type == "object") and (.properties // {} | length == 0)' <<< "${data}")" == "true" ]]; then + elif [[ "$(yq4 '(.type == "object") and (.properties // {} | length == 0)' <<<"${data}")" == "true" ]]; then echo echo "This object lacks defaults" elif [[ "${type}" != "array" ]] && [[ "${type}" != "object" ]]; then @@ -161,27 +161,29 @@ explain() { echo "This ${type} lacks defaults" fi - if [[ "$(yq4 '.examples == null' <<< "${data}")" == "false" ]]; then + if [[ "$(yq4 '.examples == null' <<<"${data}")" == "false" ]]; then echo - yq4 '{"examples": .examples}' <<< "${data}" + yq4 '{"examples": .examples}' <<<"${data}" fi - if [[ "$(yq4 '(.type == "array") and (.items.examples // [] | length != 0)' <<< "${data}")" == "true" ]]; then + if [[ "$(yq4 '(.type == "array") and (.items.examples // [] | length != 0)' <<<"${data}")" == "true" ]]; then echo - yq4 '{"item examples": .items.examples}' <<< "${data}" + yq4 '{"item examples": .items.examples}' <<<"${data}" fi } - declare schema case "${1:-}" in config) - schema="${root}/config/schemas/config.yaml" ;; + schema="${root}/config/schemas/config.yaml" + ;; secrets) - schema="${root}/config/schemas/secrets.yaml" ;; + schema="${root}/config/schemas/secrets.yaml" + ;; *) - usage "${1:-}" ;; + usage "${1:-}" + ;; esac explain "${schema}" "${2:-}" diff --git a/bin/fix-psp-violations.bash b/bin/fix-psp-violations.bash index 08f9627253..cbe5ed4a3d 100755 --- a/bin/fix-psp-violations.bash +++ b/bin/fix-psp-violations.bash @@ -61,7 +61,7 @@ function is_customer_namespace() { namespace="$1" operator_ns_regex="^($(kubectl_do "${CK8S_CLUSTER}" get ns -l owner=operator '-ojsonpath={.items[*].metadata.name}' | sed 's/ /|/g'))$" - if [[ "$namespace" =~ $operator_ns_regex ]]; then return 1; fi + if [[ "$namespace" =~ $operator_ns_regex ]]; then return 1; fi } function restart_violating_resources() { diff --git a/bin/init.bash b/bin/init.bash index 1d9beca619..96caf9b75f 100755 --- a/bin/init.bash +++ b/bin/init.bash @@ -16,306 +16,306 @@ source "${here}/common.bash" # Load cloud provider, environment name, and flavor from config if available. if [ -f "${config[default_common]}" ]; then - cloud_provider=$(yq4 '.global.ck8sCloudProvider' "${config[default_common]}") - environment_name=$(yq4 '.global.ck8sEnvironmentName' "${config[default_common]}") - flavor=$(yq4 '.global.ck8sFlavor' "${config[default_common]}") - k8s_installer=$(yq4 '.global.ck8sK8sInstaller' "${config[default_common]}") + cloud_provider=$(yq4 '.global.ck8sCloudProvider' "${config[default_common]}") + environment_name=$(yq4 '.global.ck8sEnvironmentName' "${config[default_common]}") + flavor=$(yq4 '.global.ck8sFlavor' "${config[default_common]}") + k8s_installer=$(yq4 '.global.ck8sK8sInstaller' "${config[default_common]}") fi if [ -z "${cloud_provider:-}" ]; then - : "${CK8S_CLOUD_PROVIDER:?Missing CK8S_CLOUD_PROVIDER}" + : "${CK8S_CLOUD_PROVIDER:?Missing CK8S_CLOUD_PROVIDER}" elif [ -v CK8S_CLOUD_PROVIDER ] && [ "${CK8S_CLOUD_PROVIDER}" != "${cloud_provider}" ]; then - log_error "ERROR: Cloud provider mismatch, '${cloud_provider}' in config and '${CK8S_CLOUD_PROVIDER}' in env" - exit 1 + log_error "ERROR: Cloud provider mismatch, '${cloud_provider}' in config and '${CK8S_CLOUD_PROVIDER}' in env" + exit 1 else - export CK8S_CLOUD_PROVIDER="${cloud_provider}" + export CK8S_CLOUD_PROVIDER="${cloud_provider}" fi if [ -z "${environment_name:-}" ]; then - : "${CK8S_ENVIRONMENT_NAME:?Missing CK8S_ENVIRONMENT_NAME}" + : "${CK8S_ENVIRONMENT_NAME:?Missing CK8S_ENVIRONMENT_NAME}" elif [ -v CK8S_ENVIRONMENT_NAME ] && [ "${CK8S_ENVIRONMENT_NAME}" != "${environment_name}" ]; then - log_error "ERROR: Environment name mismatch, '${environment_name}' in config and '${CK8S_ENVIRONMENT_NAME}' in env" - exit 1 + log_error "ERROR: Environment name mismatch, '${environment_name}' in config and '${CK8S_ENVIRONMENT_NAME}' in env" + exit 1 else - export CK8S_ENVIRONMENT_NAME="${environment_name}" + export CK8S_ENVIRONMENT_NAME="${environment_name}" fi if [ -z "${flavor:-}" ]; then - : "${CK8S_FLAVOR:?Missing CK8S_FLAVOR}" + : "${CK8S_FLAVOR:?Missing CK8S_FLAVOR}" elif [ -v CK8S_FLAVOR ] && [ -n "${CK8S_FLAVOR}" ] && [ "${CK8S_FLAVOR}" != "${flavor}" ]; then - log_error "ERROR: Environment flavor mismatch, '${flavor}' in config and '${CK8S_FLAVOR}' in env" - exit 1 + log_error "ERROR: Environment flavor mismatch, '${flavor}' in config and '${CK8S_FLAVOR}' in env" + exit 1 else - export CK8S_FLAVOR="${flavor}" + export CK8S_FLAVOR="${flavor}" fi if [ -z "${k8s_installer:-}" ]; then - : "${CK8S_K8S_INSTALLER:?Missing CK8S_K8S_INSTALLER}" + : "${CK8S_K8S_INSTALLER:?Missing CK8S_K8S_INSTALLER}" elif [ -v CK8S_K8S_INSTALLER ] && [ "${CK8S_K8S_INSTALLER}" != "${k8s_installer}" ]; then - log_error "ERROR: Kubernetes installer mismatch, '${k8s_installer}' in config and '${CK8S_K8S_INSTALLER}' in env" - exit 1 + log_error "ERROR: Kubernetes installer mismatch, '${k8s_installer}' in config and '${CK8S_K8S_INSTALLER}' in env" + exit 1 else - export CK8S_K8S_INSTALLER="${k8s_installer}" + export CK8S_K8S_INSTALLER="${k8s_installer}" fi # Validate the cloud provider if ! array_contains "${CK8S_CLOUD_PROVIDER}" "${ck8s_cloud_providers[@]}"; then - log_error "ERROR: Unsupported cloud provider: ${CK8S_CLOUD_PROVIDER}" - log_error "Supported providers: ${ck8s_cloud_providers[*]}" - exit 1 + log_error "ERROR: Unsupported cloud provider: ${CK8S_CLOUD_PROVIDER}" + log_error "Supported providers: ${ck8s_cloud_providers[*]}" + exit 1 fi # Validate the flavor if ! array_contains "${CK8S_FLAVOR}" "${ck8s_flavors[@]}"; then - log_error "ERROR: Unsupported flavor: ${CK8S_FLAVOR}" - log_error "Supported flavors: ${ck8s_flavors[*]}" - exit 1 + log_error "ERROR: Unsupported flavor: ${CK8S_FLAVOR}" + log_error "Supported flavors: ${ck8s_flavors[*]}" + exit 1 fi # Validate the installer if ! array_contains "${CK8S_K8S_INSTALLER}" "${ck8s_k8s_installers[@]}"; then - log_error "ERROR: Unsupported kubernetes installer: ${CK8S_K8S_INSTALLER}" - log_error "Supported kubernetes installers: ${ck8s_k8s_installers[*]}" - exit 1 + log_error "ERROR: Unsupported kubernetes installer: ${CK8S_K8S_INSTALLER}" + log_error "Supported kubernetes installers: ${ck8s_k8s_installers[*]}" + exit 1 fi generate_sops_config() { - if [ -n "${CK8S_PGP_FP:-}" ]; then - if ! gpg --list-keys | grep "${CK8S_PGP_FP}" > /dev/null 2>&1; then - log_error "ERROR: Fingerprint does not exist in gpg keyring." - log_error "CK8S_PGP_FP=${CK8S_PGP_FP}" - exit 1 - fi - fingerprint="${CK8S_PGP_FP}" - elif [ -n "${CK8S_PGP_UID:-}" ]; then - fingerprint=$(gpg --list-keys --with-colons "${CK8S_PGP_UID}" | \ - awk -F: '$1 == "fpr" {print $10;}' | head -n 1 || \ - echo "") - if [ -z "${fingerprint}" ]; then - log_error "ERROR: Unable to get fingerprint from gpg keyring using UID." - log_error "CK8S_PGP_UID=${CK8S_PGP_UID}" - exit 1 - fi - else - log_error "ERROR: CK8S_PGP_FP and CK8S_PGP_UID can't both be unset" - exit 1 + if [ -n "${CK8S_PGP_FP:-}" ]; then + if ! gpg --list-keys | grep "${CK8S_PGP_FP}" >/dev/null 2>&1; then + log_error "ERROR: Fingerprint does not exist in gpg keyring." + log_error "CK8S_PGP_FP=${CK8S_PGP_FP}" + exit 1 fi + fingerprint="${CK8S_PGP_FP}" + elif [ -n "${CK8S_PGP_UID:-}" ]; then + fingerprint=$(gpg --list-keys --with-colons "${CK8S_PGP_UID}" | + awk -F: '$1 == "fpr" {print $10;}' | head -n 1 || + echo "") + if [ -z "${fingerprint}" ]; then + log_error "ERROR: Unable to get fingerprint from gpg keyring using UID." + log_error "CK8S_PGP_UID=${CK8S_PGP_UID}" + exit 1 + fi + else + log_error "ERROR: CK8S_PGP_FP and CK8S_PGP_UID can't both be unset" + exit 1 + fi - log_info "Initializing SOPS config with PGP fingerprint: ${fingerprint}" + log_info "Initializing SOPS config with PGP fingerprint: ${fingerprint}" - sops_config_write_fingerprints "${fingerprint}" + sops_config_write_fingerprints "${fingerprint}" } # Usage: generate_default_config generate_default_config() { - if [[ $# -ne 1 ]]; then - log_error "ERROR: number of args in generate_default_config must be 1. #=[$#]" - exit 1 - fi + if [[ $# -ne 1 ]]; then + log_error "ERROR: number of args in generate_default_config must be 1. #=[$#]" + exit 1 + fi - default_config="${1}" - if [ -f "${default_config}" ]; then - backup_file "${default_config}" default - else - touch "${default_config}" - fi + default_config="${1}" + if [ -f "${default_config}" ]; then + backup_file "${default_config}" default + else + touch "${default_config}" + fi - config_name=$(echo "${default_config}" | sed -r 's/.*\///') + config_name=$(echo "${default_config}" | sed -r 's/.*\///') - new_config=$(mktemp) - append_trap "rm ${new_config}; chmod 444 ${default_config}" EXIT + new_config=$(mktemp) + append_trap "rm ${new_config}; chmod 444 ${default_config}" EXIT - local -a files - files=("${config_template_path}/${config_name}" "${config_template_path}/flavors/${CK8S_FLAVOR}/${config_name}") + local -a files + files=("${config_template_path}/${config_name}" "${config_template_path}/flavors/${CK8S_FLAVOR}/${config_name}") - if [[ -f "${config_template_path}/k8s-installers/${CK8S_K8S_INSTALLER}/${config_name}" ]]; then - files+=("${config_template_path}/k8s-installers/${CK8S_K8S_INSTALLER}/${config_name}") - fi - if [[ -f "${config_template_path}/providers/${CK8S_CLOUD_PROVIDER}/${config_name}" ]]; then - files+=("${config_template_path}/providers/${CK8S_CLOUD_PROVIDER}/${config_name}") - fi + if [[ -f "${config_template_path}/k8s-installers/${CK8S_K8S_INSTALLER}/${config_name}" ]]; then + files+=("${config_template_path}/k8s-installers/${CK8S_K8S_INSTALLER}/${config_name}") + fi + if [[ -f "${config_template_path}/providers/${CK8S_CLOUD_PROVIDER}/${config_name}" ]]; then + files+=("${config_template_path}/providers/${CK8S_CLOUD_PROVIDER}/${config_name}") + fi - yq_merge "${files[@]}" | envsubst > "${new_config}" + yq_merge "${files[@]}" | envsubst >"${new_config}" - chmod 644 "${default_config}" - cat "${new_config}" > "${default_config}" + chmod 644 "${default_config}" + cat "${new_config}" >"${default_config}" } # Usage: update_config # Updates configs to only contain custom values. update_config() { - if [[ $# -ne 1 ]]; then - log_error "ERROR: number of args in update_config must be 1. #=[$#]" - exit 1 - fi + if [[ $# -ne 1 ]]; then + log_error "ERROR: number of args in update_config must be 1. #=[$#]" + exit 1 + fi - override_config="${1}" - config_name=$(echo "${override_config}" | sed -r 's/.*\///' | sed -r 's/-config.yaml//') + override_config="${1}" + config_name=$(echo "${override_config}" | sed -r 's/.*\///' | sed -r 's/-config.yaml//') - if [ -f "${override_config}" ]; then - backup_file "${override_config}" - log_info "Updating ${config_name} config" - else - touch "${override_config}" - log_info "Creating ${config_name} config" - fi + if [ -f "${override_config}" ]; then + backup_file "${override_config}" + log_info "Updating ${config_name} config" + else + touch "${override_config}" + log_info "Creating ${config_name} config" + fi + + if [ "${config_name}" == "common" ]; then + default_config="${config[default_common]}" + base_config="${config[default_common]}" - if [ "${config_name}" == "common" ]; then - default_config="${config[default_common]}" - base_config="${config[default_common]}" - - if [[ -f "${config[override_sc]}" ]] && [[ -f "${config[override_wc]}" ]]; then - yq_copy_commons "${config[override_sc]}" "${config[override_wc]}" "${override_config}" - fi - else - default_config=$(mktemp) - append_trap "rm ${default_config}" EXIT - yq_merge "${config[default_common]}" "${CK8S_CONFIG_PATH}/defaults/${config_name}-config.yaml" > "${default_config}" - - base_config=$(mktemp) - append_trap "rm ${base_config}" EXIT - yq_merge "${default_config}" "${config[override_common]}" > "${base_config}" + if [[ -f "${config[override_sc]}" ]] && [[ -f "${config[override_wc]}" ]]; then + yq_copy_commons "${config[override_sc]}" "${config[override_wc]}" "${override_config}" fi + else + default_config=$(mktemp) + append_trap "rm ${default_config}" EXIT + yq_merge "${config[default_common]}" "${CK8S_CONFIG_PATH}/defaults/${config_name}-config.yaml" >"${default_config}" + + base_config=$(mktemp) + append_trap "rm ${base_config}" EXIT + yq_merge "${default_config}" "${config[override_common]}" >"${base_config}" + fi - new_config=$(mktemp) - append_trap "rm ${new_config}" EXIT - echo "{}" > "${new_config}" + new_config=$(mktemp) + append_trap "rm ${new_config}" EXIT + echo "{}" >"${new_config}" - yq_copy_changes "${base_config}" "${override_config}" "${new_config}" + yq_copy_changes "${base_config}" "${override_config}" "${new_config}" - if [ "${config_name}" == "common" ]; then - diff_config="${new_config}" - else - diff_config=$(mktemp) - append_trap "rm ${diff_config}" EXIT - yq_merge "${config[override_common]}" "${new_config}" > "${diff_config}" - fi + if [ "${config_name}" == "common" ]; then + diff_config="${new_config}" + else + diff_config=$(mktemp) + append_trap "rm ${diff_config}" EXIT + yq_merge "${config[override_common]}" "${new_config}" >"${diff_config}" + fi - yq_copy_values "${default_config}" "${diff_config}" "${new_config}" "set-me" + yq_copy_values "${default_config}" "${diff_config}" "${new_config}" "set-me" - if [ "${config_name}" == "common" ]; then - preamble="# Changes made here will override the default values for both the service and workload cluster." - else - preamble="# Changes made here will override the default values as well as the common config for this cluster." - fi - preamble="${preamble}\n# See the default configuration under \"defaults/\" to see available and suggested options." - echo -e "${preamble}" | cat - "${new_config}" > "${override_config}" + if [ "${config_name}" == "common" ]; then + preamble="# Changes made here will override the default values for both the service and workload cluster." + else + preamble="# Changes made here will override the default values as well as the common config for this cluster." + fi + preamble="${preamble}\n# See the default configuration under \"defaults/\" to see available and suggested options." + echo -e "${preamble}" | cat - "${new_config}" >"${override_config}" } # Usage: update_secrets update_secrets() { - if [[ $# -ne 2 ]]; then - log_error "ERROR: number of args in update_secrets must be 2. #=[$#]" - exit 1 - fi - file="${1}" - generate_new_secrets="${2}" + if [[ $# -ne 2 ]]; then + log_error "ERROR: number of args in update_secrets must be 2. #=[$#]" + exit 1 + fi + file="${1}" + generate_new_secrets="${2}" - tmpfile=$(mktemp) - append_trap "rm ${tmpfile}" EXIT + tmpfile=$(mktemp) + append_trap "rm ${tmpfile}" EXIT - yq4 eval-all 'select(fi == 0)' "${config_template_path}/secrets.yaml" > "${tmpfile}" + yq4 eval-all 'select(fi == 0)' "${config_template_path}/secrets.yaml" >"${tmpfile}" - template_file="${config_template_path}/providers/${CK8S_CLOUD_PROVIDER}/secrets.yaml" - if [[ -a "${template_file}" ]]; then - yq4 -i ". *= load(\"${template_file}\")" "${tmpfile}" - fi + template_file="${config_template_path}/providers/${CK8S_CLOUD_PROVIDER}/secrets.yaml" + if [[ -e "${template_file}" ]]; then + yq4 -i ". *= load(\"${template_file}\")" "${tmpfile}" + fi - generate_secrets "${tmpfile}" + generate_secrets "${tmpfile}" - if [[ -f "${file}" ]]; then - sops_decrypt "${file}" - yq4 --inplace '... comments=""' "${tmpfile}" - yq4 eval-all --inplace --prettyPrint 'select(fi == 0) * select(fi == 1)' "${tmpfile}" "${file}" - fi + if [[ -f "${file}" ]]; then + sops_decrypt "${file}" + yq4 --inplace '... comments=""' "${tmpfile}" + yq4 eval-all --inplace --prettyPrint 'select(fi == 0) * select(fi == 1)' "${tmpfile}" "${file}" + fi - if [ "${generate_new_secrets}" = "true" ]; then - generate_secrets "${tmpfile}" - fi + if [ "${generate_new_secrets}" = "true" ]; then + generate_secrets "${tmpfile}" + fi - cat "${tmpfile}" > "${file}" - sops_encrypt "${file}" + cat "${tmpfile}" >"${file}" + sops_encrypt "${file}" } # Usage: generate_secrets generate_secrets() { - if [[ $# -ne 1 ]]; then - log_error "ERROR: number of args in generate_secrets must be 1. #=[$#]" - exit 1 - fi - tmpfile="${1}" - - # https://unix.stackexchange.com/questions/307994/compute-bcrypt-hash-from-command-line - - OS_ADMIN_PASS=$(pwgen -cns 20 1) - OS_ADMIN_PASS_HASH=$(htpasswd -bnBC 10 "" "${OS_ADMIN_PASS}" | tr -d ':\n') - - OS_CONF_PASS=$(pwgen -cns 20 1) - OS_CONF_PASS_HASH=$(htpasswd -bnBC 10 "" "${OS_CONF_PASS}" | tr -d ':\n') - - OSD_PASS=$(pwgen -cns 20 1) - OSD_PASS_HASH=$(htpasswd -bnBC 10 "" "${OSD_PASS}" | tr -d ':\n') - - DEX_STATIC_PASS=$(pwgen -cns 20 1) - # shellcheck disable=SC2016 - DEX_STATIC_PASS_HASH=$(htpasswd -bnBC 10 "" "${DEX_STATIC_PASS}" | tr -d ':\n' | sed 's/$2y/$2a/') - - THANOS_INGRESS_PASS=$(pwgen -cns 20 1) - THANOS_INGRESS_PASS_HASH=$(htpasswd -bn "" "${THANOS_INGRESS_PASS}" | tr -d ':\n') - - HARBOR_REGISTRY_PASS=$(pwgen -cns 20 1) - HARBOR_REGISTRY_PASS_HTPASSWD=$(htpasswd -bnB "harbor_registry_user" "${HARBOR_REGISTRY_PASS}" | tr -d '\n') - - yq4 --inplace ".grafana.password= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".grafana.clientSecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".grafana.opsClientSecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".harbor.password= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".harbor.registryPassword= \"${HARBOR_REGISTRY_PASS}\"" "${tmpfile}" - yq4 --inplace ".harbor.registryPasswordHtpasswd= \"${HARBOR_REGISTRY_PASS_HTPASSWD}\"" "${tmpfile}" - yq4 --inplace ".harbor.internal.databasePassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".harbor.clientSecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".harbor.xsrf= \"$(pwgen -cns 32 1)\"" "${tmpfile}" - yq4 --inplace ".harbor.coreSecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".harbor.jobserviceSecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".harbor.registrySecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".opensearch.adminPassword= \"${OS_ADMIN_PASS}\"" "${tmpfile}" - yq4 --inplace ".opensearch.adminHash= \"${OS_ADMIN_PASS_HASH}\"" "${tmpfile}" - yq4 --inplace ".opensearch.clientSecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".opensearch.configurerPassword= \"${OS_CONF_PASS}\"" "${tmpfile}" - yq4 --inplace ".opensearch.configurerHash= \"${OS_CONF_PASS_HASH}\"" "${tmpfile}" - yq4 --inplace ".opensearch.dashboardsPassword= \"${OSD_PASS}\"" "${tmpfile}" - yq4 --inplace ".opensearch.dashboardsHash= \"${OSD_PASS_HASH}\"" "${tmpfile}" - yq4 --inplace ".opensearch.dashboardsCookieEncKey= \"$(pwgen -cns 32 1)\"" "${tmpfile}" - yq4 --inplace ".opensearch.fluentdPassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".opensearch.curatorPassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".opensearch.snapshotterPassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".opensearch.metricsExporterPassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".kubeapiMetricsPassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".dex.staticPasswordNotHashed= \"${DEX_STATIC_PASS}\"" "${tmpfile}" - yq4 --inplace ".dex.staticPassword= \"${DEX_STATIC_PASS_HASH}\"" "${tmpfile}" - yq4 --inplace ".dex.kubeloginClientSecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".user.grafanaPassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".user.alertmanagerPassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" - yq4 --inplace ".thanos.receiver.basic_auth.password= \"${THANOS_INGRESS_PASS}\"" "${tmpfile}" - yq4 --inplace ".thanos.receiver.basic_auth.passwordHash= \"${THANOS_INGRESS_PASS_HASH}\"" "${tmpfile}" + if [[ $# -ne 1 ]]; then + log_error "ERROR: number of args in generate_secrets must be 1. #=[$#]" + exit 1 + fi + tmpfile="${1}" + + # https://unix.stackexchange.com/questions/307994/compute-bcrypt-hash-from-command-line + + OS_ADMIN_PASS=$(pwgen -cns 20 1) + OS_ADMIN_PASS_HASH=$(htpasswd -bnBC 10 "" "${OS_ADMIN_PASS}" | tr -d ':\n') + + OS_CONF_PASS=$(pwgen -cns 20 1) + OS_CONF_PASS_HASH=$(htpasswd -bnBC 10 "" "${OS_CONF_PASS}" | tr -d ':\n') + + OSD_PASS=$(pwgen -cns 20 1) + OSD_PASS_HASH=$(htpasswd -bnBC 10 "" "${OSD_PASS}" | tr -d ':\n') + + DEX_STATIC_PASS=$(pwgen -cns 20 1) + # shellcheck disable=SC2016 + DEX_STATIC_PASS_HASH=$(htpasswd -bnBC 10 "" "${DEX_STATIC_PASS}" | tr -d ':\n' | sed 's/$2y/$2a/') + + THANOS_INGRESS_PASS=$(pwgen -cns 20 1) + THANOS_INGRESS_PASS_HASH=$(htpasswd -bn "" "${THANOS_INGRESS_PASS}" | tr -d ':\n') + + HARBOR_REGISTRY_PASS=$(pwgen -cns 20 1) + HARBOR_REGISTRY_PASS_HTPASSWD=$(htpasswd -bnB "harbor_registry_user" "${HARBOR_REGISTRY_PASS}" | tr -d '\n') + + yq4 --inplace ".grafana.password= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".grafana.clientSecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".grafana.opsClientSecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".harbor.password= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".harbor.registryPassword= \"${HARBOR_REGISTRY_PASS}\"" "${tmpfile}" + yq4 --inplace ".harbor.registryPasswordHtpasswd= \"${HARBOR_REGISTRY_PASS_HTPASSWD}\"" "${tmpfile}" + yq4 --inplace ".harbor.internal.databasePassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".harbor.clientSecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".harbor.xsrf= \"$(pwgen -cns 32 1)\"" "${tmpfile}" + yq4 --inplace ".harbor.coreSecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".harbor.jobserviceSecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".harbor.registrySecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".opensearch.adminPassword= \"${OS_ADMIN_PASS}\"" "${tmpfile}" + yq4 --inplace ".opensearch.adminHash= \"${OS_ADMIN_PASS_HASH}\"" "${tmpfile}" + yq4 --inplace ".opensearch.clientSecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".opensearch.configurerPassword= \"${OS_CONF_PASS}\"" "${tmpfile}" + yq4 --inplace ".opensearch.configurerHash= \"${OS_CONF_PASS_HASH}\"" "${tmpfile}" + yq4 --inplace ".opensearch.dashboardsPassword= \"${OSD_PASS}\"" "${tmpfile}" + yq4 --inplace ".opensearch.dashboardsHash= \"${OSD_PASS_HASH}\"" "${tmpfile}" + yq4 --inplace ".opensearch.dashboardsCookieEncKey= \"$(pwgen -cns 32 1)\"" "${tmpfile}" + yq4 --inplace ".opensearch.fluentdPassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".opensearch.curatorPassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".opensearch.snapshotterPassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".opensearch.metricsExporterPassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".kubeapiMetricsPassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".dex.staticPasswordNotHashed= \"${DEX_STATIC_PASS}\"" "${tmpfile}" + yq4 --inplace ".dex.staticPassword= \"${DEX_STATIC_PASS_HASH}\"" "${tmpfile}" + yq4 --inplace ".dex.kubeloginClientSecret= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".user.grafanaPassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".user.alertmanagerPassword= \"$(pwgen -cns 20 1)\"" "${tmpfile}" + yq4 --inplace ".thanos.receiver.basic_auth.password= \"${THANOS_INGRESS_PASS}\"" "${tmpfile}" + yq4 --inplace ".thanos.receiver.basic_auth.passwordHash= \"${THANOS_INGRESS_PASS_HASH}\"" "${tmpfile}" } # Usage: backup_file [suffix] backup_file() { - file="${1}" - if [ ! -f "${file}" ]; then - log_error "ERROR: args in backup_file must be a file. [${file}]" - fi + file="${1}" + if [ ! -f "${file}" ]; then + log_error "ERROR: args in backup_file must be a file. [${file}]" + fi - if [ ! -d "${backup_config_path}" ]; then - mkdir -p "${backup_config_path}" - fi + if [ ! -d "${backup_config_path}" ]; then + mkdir -p "${backup_config_path}" + fi - if [ ${#} -gt 1 ]; then - backup_name=$(echo "${file}" | sed "s/.*\///" | sed "s/-config.yaml/-$2-$(date +%y%m%d%H%M%S).yaml/") - else - backup_name=$(echo "${file}" | sed "s/.*\///" | sed "s/.yaml/-$(date +%y%m%d%H%M%S).yaml/") - fi + if [ ${#} -gt 1 ]; then + backup_name=$(echo "${file}" | sed "s/.*\///" | sed "s/-config.yaml/-$2-$(date +%y%m%d%H%M%S).yaml/") + else + backup_name=$(echo "${file}" | sed "s/.*\///" | sed "s/.yaml/-$(date +%y%m%d%H%M%S).yaml/") + fi - log_info "Creating backup ${backup_config_path}/${backup_name}" + log_info "Creating backup ${backup_config_path}/${backup_name}" - cp "${file}" "${backup_config_path}/${backup_name}" + cp "${file}" "${backup_config_path}/${backup_name}" } backup_retention() { @@ -347,14 +347,14 @@ backup_retention() { if "${CK8S_AUTO_APPROVE}"; then log_warning "Removing backups older than ${CK8S_INIT_BACKUP_DAYS:-30} days:" - yq4 -M 'split(" ") | sort' <<< "${backups[@]:-}" + yq4 -M 'split(" ") | sort' <<<"${backups[@]:-}" # Needs to be with force else it'll stop on read-only files rm -f "${backups[@]:-}" elif [[ -t 1 ]]; then log_warning "Backups older than ${CK8S_INIT_BACKUP_DAYS:-30} days:" - yq4 -M 'split(" ") | sort' <<< "${backups[@]:-}" + yq4 -M 'split(" ") | sort' <<<"${backups[@]:-}" log_warning_no_newline "Do you want to remove them? (Y/n): " 1>&2 read -r reply @@ -371,10 +371,10 @@ backup_retention() { log_info "Initializing CK8S configuration for $CK8S_ENVIRONMENT_NAME with $CK8S_CLOUD_PROVIDER:$CK8S_K8S_INSTALLER:$CK8S_FLAVOR" if [ -f "${sops_config}" ]; then - log_info "SOPS config already exists: ${sops_config}" - validate_sops_config + log_info "SOPS config already exists: ${sops_config}" + validate_sops_config else - generate_sops_config + generate_sops_config fi mkdir -p "${state_path}" @@ -388,29 +388,29 @@ CK8S_VERSION=$(version_get) export CK8S_VERSION generate_default_config "${config[default_common]}" -update_config "${config[override_common]}" +update_config "${config[override_common]}" if [[ "${CK8S_CLUSTER:-}" =~ ^(sc|both)$ ]]; then - generate_default_config "${config[default_sc]}" - update_config "${config[override_sc]}" + generate_default_config "${config[default_sc]}" + update_config "${config[override_sc]}" fi if [[ "${CK8S_CLUSTER:-}" =~ ^(wc|both)$ ]]; then generate_default_config "${config[default_wc]}" - update_config "${config[override_wc]}" + update_config "${config[override_wc]}" fi gen_new_secrets=true if [ -f "${secrets[secrets_file]}" ]; then - backup_file "${secrets[secrets_file]}" - if [ ${#} -gt 0 ] && [ "${1}" = "--generate-new-secrets" ]; then - log_info "Updating and generating new secrets" - else - log_info "Updating secrets" - gen_new_secrets=false - fi + backup_file "${secrets[secrets_file]}" + if [ ${#} -gt 0 ] && [ "${1}" = "--generate-new-secrets" ]; then + log_info "Updating and generating new secrets" + else + log_info "Updating secrets" + gen_new_secrets=false + fi else - log_info "Generating new secrets" + log_info "Generating new secrets" fi update_secrets "${secrets[secrets_file]}" "${gen_new_secrets}" diff --git a/bin/kubeconfig.bash b/bin/kubeconfig.bash index d840098253..27640cab12 100755 --- a/bin/kubeconfig.bash +++ b/bin/kubeconfig.bash @@ -7,47 +7,47 @@ here="$(dirname "$(readlink -f "$0")")" source "${here}/common.bash" usage() { - echo "Usage: kubeconfig | admin [cluster_name]>" >&2 - exit 1 + echo "Usage: kubeconfig | admin [cluster_name]>" >&2 + exit 1 } [ -z "${1:-}" ] && usage get_user_server() { - ( - with_kubeconfig "${kubeconfig}" \ - kubectl config view -o jsonpath="{.clusters[0].cluster.server}" - ) + ( + with_kubeconfig "${kubeconfig}" \ + kubectl config view -o jsonpath="{.clusters[0].cluster.server}" + ) } set_cluster() { - user_kubeconfig=$1 - - user_server=$(get_user_server) - user_certificate_authority=/tmp/user-authority.pem - append_trap "rm ${user_certificate_authority}" EXIT - ( - with_kubeconfig "${kubeconfig}" \ - kubectl config view --raw \ - -o jsonpath="{.clusters[0].cluster.certificate-authority-data}" \ - | base64 --decode > ${user_certificate_authority} - ) - - kubectl --kubeconfig="${user_kubeconfig}" config set-cluster "${cluster_name}" \ + user_kubeconfig=$1 + + user_server=$(get_user_server) + user_certificate_authority=/tmp/user-authority.pem + append_trap "rm ${user_certificate_authority}" EXIT + ( + with_kubeconfig "${kubeconfig}" \ + kubectl config view --raw \ + -o jsonpath="{.clusters[0].cluster.certificate-authority-data}" | + base64 --decode >${user_certificate_authority} + ) + + kubectl --kubeconfig="${user_kubeconfig}" config set-cluster "${cluster_name}" \ --server="${user_server}" \ --certificate-authority="${user_certificate_authority}" --embed-certs=true } set_dex_credentials() { - user_kubeconfig=$1 - name=$2 - cluster_name=$3 + user_kubeconfig=$1 + name=$2 + cluster_name=$3 - config_load sc - cluster_config="${config[config_file_sc]}" - base_domain=$(yq4 '.global.baseDomain' "${cluster_config}") + config_load sc + cluster_config="${config[config_file_sc]}" + base_domain=$(yq4 '.global.baseDomain' "${cluster_config}") - kubectl --kubeconfig="${user_kubeconfig}" config set-credentials "${name}@${cluster_name}" \ + kubectl --kubeconfig="${user_kubeconfig}" config set-credentials "${name}@${cluster_name}" \ --exec-command=kubectl \ --exec-api-version=client.authentication.k8s.io/v1beta1 \ --exec-arg=oidc-login \ @@ -60,90 +60,90 @@ set_dex_credentials() { } set_context() { - user_kubeconfig=$1 - cluster_name=$2 - context_name=$3 - user_name=$4 - context_namespace=$5 + user_kubeconfig=$1 + cluster_name=$2 + context_name=$3 + user_name=$4 + context_namespace=$5 - kubectl --kubeconfig="${user_kubeconfig}" config set-context \ + kubectl --kubeconfig="${user_kubeconfig}" config set-context \ "${context_name}" \ --user "${user_name}@${cluster_name}" --cluster="${cluster_name}" --namespace="${context_namespace}" } use_context() { - user_kubeconfig=$1 - cluster_name=$2 + user_kubeconfig=$1 + cluster_name=$2 - kubectl --kubeconfig="${user_kubeconfig}" config use-context \ + kubectl --kubeconfig="${user_kubeconfig}" config use-context \ "${cluster_name}" } case "${1}" in - user) - config_load wc - cluster_config="${config[config_file_wc]}" - kubeconfig="${config[kube_config_wc]}" - user_kubeconfig=${CK8S_CONFIG_PATH}/user/secret/kubeconfig.yaml - ;; - dev) - serviceAccount="${2:-}" - if [ -z "${serviceAccount}" ]; then - echo "Error: Service account name is needed" >&2 - usage - fi +user) + config_load wc + cluster_config="${config[config_file_wc]}" + kubeconfig="${config[kube_config_wc]}" + user_kubeconfig=${CK8S_CONFIG_PATH}/user/secret/kubeconfig.yaml + ;; +dev) + serviceAccount="${2:-}" + if [ -z "${serviceAccount}" ]; then + echo "Error: Service account name is needed" >&2 + usage + fi - config_load wc - cluster_config="${config[config_file_wc]}" - kubeconfig="${config[kube_config_wc]}" + config_load wc + cluster_config="${config[config_file_wc]}" + kubeconfig="${config[kube_config_wc]}" - if [[ ! $(with_kubeconfig "${kubeconfig}" kubectl get serviceaccount "${serviceAccount}" 2>/dev/null) ]]; then - log_error "Service account ${serviceAccount} not found" - log_error " Add service account ${serviceAccount} in your wc-config" - log_error " Then apply app=user-rbac" - exit - fi + if [[ ! $(with_kubeconfig "${kubeconfig}" kubectl get serviceaccount "${serviceAccount}" 2>/dev/null) ]]; then + log_error "Service account ${serviceAccount} not found" + log_error " Add service account ${serviceAccount} in your wc-config" + log_error " Then apply app=user-rbac" + exit + fi - log_info "Adding dev ${serviceAccount} context to wc-config" + log_info "Adding dev ${serviceAccount} context to wc-config" - token=$(with_kubeconfig "${kubeconfig}" kubectl get secrets secret-"${serviceAccount}" -ojsonpath="{.data.token}" | base64 -d) - cluster_name=$(yq4 '.global.clusterName' "${cluster_config}") + token=$(with_kubeconfig "${kubeconfig}" kubectl get secrets secret-"${serviceAccount}" -ojsonpath="{.data.token}" | base64 -d) + cluster_name=$(yq4 '.global.clusterName' "${cluster_config}") - kubectl --kubeconfig="${kubeconfig}" config set-credentials "${serviceAccount}@${cluster_name}" \ - --token="${token}" + kubectl --kubeconfig="${kubeconfig}" config set-credentials "${serviceAccount}@${cluster_name}" \ + --token="${token}" - set_context "${kubeconfig}" "${cluster_name}" "${serviceAccount}" "${serviceAccount}" "default" + set_context "${kubeconfig}" "${cluster_name}" "${serviceAccount}" "${serviceAccount}" "default" - log_info "Dev context finished" - exit + log_info "Dev context finished" + exit + ;; +admin) + case "${2:-}" in + sc) + config_load sc + cluster_config="${config[config_file_sc]}" + kubeconfig="${config[kube_config_sc]}" ;; - admin) - case "${2:-}" in - sc) - config_load sc - cluster_config="${config[config_file_sc]}" - kubeconfig="${config[kube_config_sc]}" - ;; - wc) - config_load wc - cluster_config="${config[config_file_wc]}" - kubeconfig="${config[kube_config_wc]}" - ;; - *) usage ;; - esac - cluster="$2" - if [[ $# -gt 2 ]]; then - kubeconfig="${state_path}/kube_config_$3.yaml" - cluster="$3" - fi - user_kubeconfig=${CK8S_CONFIG_PATH}/.state/admin-kubeconfig-${cluster}.yaml + wc) + config_load wc + cluster_config="${config[config_file_wc]}" + kubeconfig="${config[kube_config_wc]}" ;; - *) usage ;; + *) usage ;; + esac + cluster="$2" + if [[ $# -gt 2 ]]; then + kubeconfig="${state_path}/kube_config_$3.yaml" + cluster="$3" + fi + user_kubeconfig=${CK8S_CONFIG_PATH}/.state/admin-kubeconfig-${cluster}.yaml + ;; +*) usage ;; esac if [[ ! -f "${kubeconfig}" ]]; then - log_error "${kubeconfig} not found" - usage + log_error "${kubeconfig} not found" + usage fi log_info "Creating kubeconfig for the ${1}" @@ -156,9 +156,9 @@ set_dex_credentials "${user_kubeconfig}" "${1}" "${cluster_name}" # Create context with relevant namespace # Pick the first namespace if [[ ${1} == "user" ]]; then - context_namespace=$(yq4 '.user.namespaces[0]' "${config[config_file_wc]}") + context_namespace=$(yq4 '.user.namespaces[0]' "${config[config_file_wc]}") else - context_namespace="default" + context_namespace="default" fi set_context "${user_kubeconfig}" "${cluster_name}" "${cluster_name}" "${1}" "${context_namespace}" diff --git a/bin/ops.bash b/bin/ops.bash index 16380cad11..424a07dc14 100755 --- a/bin/ops.bash +++ b/bin/ops.bash @@ -10,99 +10,99 @@ here="$(dirname "$(readlink -f "$0")")" source "${here}/common.bash" usage() { - echo "Usage: kubectl ..." >&2 - echo " kubecolor ..." >&2 - echo " helm ..." >&2 - echo " helmfile ..." >&2 - exit 1 + echo "Usage: kubectl ..." >&2 + echo " kubecolor ..." >&2 + echo " helm ..." >&2 + echo " helmfile ..." >&2 + exit 1 } # Run arbitrary kubecolor commands as cluster admin. ops_kubecolor() { - case "${1}" in - sc) kubeconfig="${config[kube_config_sc]}" ;; - wc) kubeconfig="${config[kube_config_wc]}" ;; - *) usage ;; - esac - shift - with_kubeconfig "${kubeconfig}" kubecolor "${@}" + case "${1}" in + sc) kubeconfig="${config[kube_config_sc]}" ;; + wc) kubeconfig="${config[kube_config_wc]}" ;; + *) usage ;; + esac + shift + with_kubeconfig "${kubeconfig}" kubecolor "${@}" } # Run arbitrary kubectl commands as cluster admin. ops_kubectl() { - case "${1}" in - sc) kubeconfig="${config[kube_config_sc]}" ;; - wc) kubeconfig="${config[kube_config_wc]}" ;; - *) usage ;; - esac - shift - with_kubeconfig "${kubeconfig}" kubectl "${@}" + case "${1}" in + sc) kubeconfig="${config[kube_config_sc]}" ;; + wc) kubeconfig="${config[kube_config_wc]}" ;; + *) usage ;; + esac + shift + with_kubeconfig "${kubeconfig}" kubectl "${@}" } # Run arbitrary helm commands as cluster admin. ops_helm() { - case "${1}" in - sc) kubeconfig="${config[kube_config_sc]}" ;; - wc) kubeconfig="${config[kube_config_wc]}" ;; - *) usage ;; - esac - shift - with_kubeconfig "${kubeconfig}" helm "${@}" + case "${1}" in + sc) kubeconfig="${config[kube_config_sc]}" ;; + wc) kubeconfig="${config[kube_config_wc]}" ;; + *) usage ;; + esac + shift + with_kubeconfig "${kubeconfig}" helm "${@}" } # Run arbitrary Helmfile commands as cluster admin. ops_helmfile() { - config_load "$1" + config_load "$1" - case "${1}" in - sc) - cluster="service_cluster" - kubeconfig="${config[kube_config_sc]}" - ;; - wc) - cluster="workload_cluster" - kubeconfig="${config[kube_config_wc]}" - ;; - *) usage ;; - esac + case "${1}" in + sc) + cluster="service_cluster" + kubeconfig="${config[kube_config_sc]}" + ;; + wc) + cluster="workload_cluster" + kubeconfig="${config[kube_config_wc]}" + ;; + *) usage ;; + esac - shift + shift - with_kubeconfig "${kubeconfig}" \ - helmfile -f "${here}/../helmfile.d/" -e ${cluster} "${@}" + with_kubeconfig "${kubeconfig}" \ + helmfile -f "${here}/../helmfile.d/" -e ${cluster} "${@}" } # Run arbitrary Velero commands as cluster admin. ops_velero() { - case "${1}" in - sc) kubeconfig="${config[kube_config_sc]}" ;; - wc) kubeconfig="${config[kube_config_wc]}" ;; - *) usage ;; - esac - shift - with_kubeconfig "${kubeconfig}" velero "${@}" + case "${1}" in + sc) kubeconfig="${config[kube_config_sc]}" ;; + wc) kubeconfig="${config[kube_config_wc]}" ;; + *) usage ;; + esac + shift + with_kubeconfig "${kubeconfig}" velero "${@}" } case "${1}" in - kubectl) - shift - ops_kubectl "${@}" - ;; - kubecolor) - shift - ops_kubecolor "${@}" - ;; - helm) - shift - ops_helm "${@}" - ;; - helmfile) - shift - ops_helmfile "${@}" - ;; - velero) - shift - ops_velero "${@}" - ;; - *) usage ;; +kubectl) + shift + ops_kubectl "${@}" + ;; +kubecolor) + shift + ops_kubecolor "${@}" + ;; +helm) + shift + ops_helm "${@}" + ;; +helmfile) + shift + ops_helmfile "${@}" + ;; +velero) + shift + ops_velero "${@}" + ;; +*) usage ;; esac diff --git a/bin/team.bash b/bin/team.bash index e35276f1ef..ad3ac8eca9 100755 --- a/bin/team.bash +++ b/bin/team.bash @@ -12,94 +12,98 @@ declare -a split_fingerprints # Join a list. # join_by - a b c # a-b-c -join_by() { local IFS="${1}"; shift; echo "${*}"; } +join_by() { + local IFS="${1}" + shift + echo "${*}" +} # Load fingerprints from the SOPS config file into `split_fingerprints`. sops_load_fingerprints() { - fingerprints=$(yq4 'select(documentIndex == 0) | .creation_rules[0].pgp' < "$sops_config") - IFS=',' read -r -a split_fingerprints <<< "${fingerprints}" + fingerprints=$(yq4 'select(documentIndex == 0) | .creation_rules[0].pgp' <"$sops_config") + IFS=',' read -r -a split_fingerprints <<<"${fingerprints}" } # Write the fingerprints in `split_fingerprints` in the SOPS config file. sops_save_fingerprints() { - fingerprints="$(join_by , "${split_fingerprints[@]}")" - sops_config_write_fingerprints "${fingerprints}" + fingerprints="$(join_by , "${split_fingerprints[@]}")" + sops_config_write_fingerprints "${fingerprints}" } # Add a PGP fingerprint to the SOPS config file if it doesn't already exist. sops_add_pgp() { - sops_load_fingerprints + sops_load_fingerprints - for fingerprint in "${split_fingerprints[@]}"; do - if [ "${1}" = "${fingerprint}" ]; then - log_error "PGP fingerprint already in sops config: ${sops_config}" - exit 1 - fi - done + for fingerprint in "${split_fingerprints[@]}"; do + if [ "${1}" = "${fingerprint}" ]; then + log_error "PGP fingerprint already in sops config: ${sops_config}" + exit 1 + fi + done - split_fingerprints+=("${1}") + split_fingerprints+=("${1}") - log_info "Adding PGP key: ${1}" + log_info "Adding PGP key: ${1}" - sops_save_fingerprints + sops_save_fingerprints } # Remove a PGP fingerprint from the SOPS config file if it exists. sops_remove_pgp() { - sops_load_fingerprints - - found=false - for i in "${!split_fingerprints[@]}"; do - if [ "${1}" = "${split_fingerprints[i]}" ]; then - unset 'split_fingerprints[i]' - found=true - break - fi - done - if [ "${found}" != "true" ]; then - log_error "PGP fingerprint not found in sops config: ${sops_config}" - exit 1 + sops_load_fingerprints + + found=false + for i in "${!split_fingerprints[@]}"; do + if [ "${1}" = "${split_fingerprints[i]}" ]; then + unset 'split_fingerprints[i]' + found=true + break fi + done + if [ "${found}" != "true" ]; then + log_error "PGP fingerprint not found in sops config: ${sops_config}" + exit 1 + fi - if [ "${#split_fingerprints[@]}" -eq 0 ]; then - log_error "Refusing to remove the only remaining PGP key." - exit - fi + if [ "${#split_fingerprints[@]}" -eq 0 ]; then + log_error "Refusing to remove the only remaining PGP key." + exit + fi - log_info "Removing PGP key: ${1}" + log_info "Removing PGP key: ${1}" - sops_save_fingerprints + sops_save_fingerprints } # Update all secrets with the public keys from the fingerprints in the SOPS # config file. sops_update_keys() { - for secret in "${secrets[@]}"; do - if [ ! -f "${secret}" ]; then - log_warning "Secret does not exist: ${secret}" - continue - fi - - log_info "Updating keys in: ${secret}" - - # sops updatekeys does not take the --config flag, need to change cwd. - pushd "${CK8S_CONFIG_PATH}" > /dev/null - sops updatekeys --yes "${secret}" - popd > /dev/null - done + for secret in "${secrets[@]}"; do + if [ ! -f "${secret}" ]; then + log_warning "Secret does not exist: ${secret}" + continue + fi + + log_info "Updating keys in: ${secret}" + + # sops updatekeys does not take the --config flag, need to change cwd. + pushd "${CK8S_CONFIG_PATH}" >/dev/null + sops updatekeys --yes "${secret}" + popd >/dev/null + done } # Rotate the data key in all secrets. sops_rotate_data_key() { - for secret in "${secrets[@]}"; do - if [ ! -f "${secret}" ]; then - log_warning "Secret does not exist: ${secret}" - continue - fi - - log_info "Rotating data key and reencrypting: ${secret}" - sops --config "${sops_config}" -r -i "${secret}" - done + for secret in "${secrets[@]}"; do + if [ ! -f "${secret}" ]; then + log_warning "Secret does not exist: ${secret}" + continue + fi + + log_info "Rotating data key and reencrypting: ${secret}" + sops --config "${sops_config}" -r -i "${secret}" + done } # Add a fingerprint to the SOPS config file if it doesn't already exist and @@ -108,11 +112,11 @@ sops_rotate_data_key() { # 1. Edit .sops.yaml and append fingerprint to PGP creation_rule. # 2. Run `sops updatekeys --yes [file]` on all secrets files. add_pgp() { - fingerprint="${1}" + fingerprint="${1}" - sops_add_pgp "${fingerprint}" + sops_add_pgp "${fingerprint}" - sops_update_keys + sops_update_keys } # Remove a fingerprint from the SOPS config file, update all the secrets and @@ -123,21 +127,21 @@ add_pgp() { # 2. Run `sops updatekeys --yes [file]` on all secrets files. # 3. Run `sops -r -i [file]` on all secrets files. remove_pgp() { - fingerprint="${1}" + fingerprint="${1}" - sops_remove_pgp "${fingerprint}" + sops_remove_pgp "${fingerprint}" - sops_update_keys + sops_update_keys - sops_rotate_data_key + sops_rotate_data_key } case "${1}" in - "add-pgp") add_pgp "${2}" ;; - "remove-pgp") remove_pgp "${2}" ;; - *) - log_error "ERROR: ${1} is not a valid argument" - log_error "Usage: ${0} " - exit 1 - ;; +"add-pgp") add_pgp "${2}" ;; +"remove-pgp") remove_pgp "${2}" ;; +*) + log_error "ERROR: ${1} is not a valid argument" + log_error "Usage: ${0} " + exit 1 + ;; esac diff --git a/bin/test.bash b/bin/test.bash index 2d568b96da..764b7ef469 100755 --- a/bin/test.bash +++ b/bin/test.bash @@ -22,146 +22,146 @@ source "${pipeline_path}/test/services/workload-cluster/testIngress.sh" source "${pipeline_path}/test/services/workload-cluster/testHNC.sh" test_apps_sc() { - log_info "Testing service cluster" + log_info "Testing service cluster" - "${pipeline_path}/test/services/test-sc.sh" "${config[config_file_sc]}" "${@}" + "${pipeline_path}/test/services/test-sc.sh" "${config[config_file_sc]}" "${@}" } test_apps_wc() { - log_info "Testing workload cluster" + log_info "Testing workload cluster" - "${pipeline_path}/test/services/test-wc.sh" "${config[config_file_wc]}" "${@}" + "${pipeline_path}/test/services/test-wc.sh" "${config[config_file_wc]}" "${@}" } function sc_help() { - printf "%s\n" "[Usage]: test sc [target] [ARGUMENTS]" - printf "%s\n" "List of targets:" - printf "\t%-23s %s\n" "apps" "Apps checks" - printf "\t%-23s %s\n" "opensearch" "Open search checks" - printf "\t%-23s %s\n" "cert-manager" "Cert Manager checks" - printf "\t%-23s %s\n" "ingress" "Ingress checks" - printf "%s\n" "[NOTE] If no target is specified, all of them will be executed." - printf "%s\n" "[NOTE] Logging can be enabled for test sc and test sc apps by using the --logging-enabled flag." + printf "%s\n" "[Usage]: test sc [target] [ARGUMENTS]" + printf "%s\n" "List of targets:" + printf "\t%-23s %s\n" "apps" "Apps checks" + printf "\t%-23s %s\n" "opensearch" "Open search checks" + printf "\t%-23s %s\n" "cert-manager" "Cert Manager checks" + printf "\t%-23s %s\n" "ingress" "Ingress checks" + printf "%s\n" "[NOTE] If no target is specified, all of them will be executed." + printf "%s\n" "[NOTE] Logging can be enabled for test sc and test sc apps by using the --logging-enabled flag." } function wc_help() { - printf "%s\n" "[Usage]: test wc [target] [ARGUMENTS]" - printf "%s\n" "List of targets:" - printf "\t%-23s %s\n" "apps" "Apps checks" - printf "\t%-23s %s\n" "cert-manager" "Cert Manager checks" - printf "\t%-23s %s\n" "ingress" "Ingress checks" - printf "\t%-23s %s\n" "hnc" "HNC checks" - printf "%s\n" "[NOTE] If no target is specified, all of them will be executed." - printf "%s\n" "[NOTE] Logging can be enabled for test wc and test wc apps by using the --logging-enabled flag." + printf "%s\n" "[Usage]: test wc [target] [ARGUMENTS]" + printf "%s\n" "List of targets:" + printf "\t%-23s %s\n" "apps" "Apps checks" + printf "\t%-23s %s\n" "cert-manager" "Cert Manager checks" + printf "\t%-23s %s\n" "ingress" "Ingress checks" + printf "\t%-23s %s\n" "hnc" "HNC checks" + printf "%s\n" "[NOTE] If no target is specified, all of them will be executed." + printf "%s\n" "[NOTE] Logging can be enabled for test wc and test wc apps by using the --logging-enabled flag." } function sc() { - if [[ ${#} == 0 ]] || [[ ${#} == 1 && ${1} == "--logging-enabled" ]]; then - set +e - test_apps_sc "${@:1}" - set -e - log_info "Testing opensearch\n" - sc_opensearch_checks - echo - log_info "Testing cert-manager\n" - sc_cert_manager_checks - echo - log_info "Testing ingress\n" - sc_ingress_checks - else - case ${1} in - apps) - test_apps_sc "${@:2}" - ;; - opensearch) - sc_opensearch_checks "${@:2}" - ;; - cert-manager) - sc_cert_manager_checks "${@:2}" - ;; - ingress) - sc_ingress_checks "${@:2}" - ;; - --help | -h) - sc_help - ;; - *) - echo "unknown command: $1" - sc_help 1 - exit 1 - ;; - esac - fi - exit 0 + if [[ ${#} == 0 ]] || [[ ${#} == 1 && ${1} == "--logging-enabled" ]]; then + set +e + test_apps_sc "${@:1}" + set -e + log_info "Testing opensearch\n" + sc_opensearch_checks + echo + log_info "Testing cert-manager\n" + sc_cert_manager_checks + echo + log_info "Testing ingress\n" + sc_ingress_checks + else + case ${1} in + apps) + test_apps_sc "${@:2}" + ;; + opensearch) + sc_opensearch_checks "${@:2}" + ;; + cert-manager) + sc_cert_manager_checks "${@:2}" + ;; + ingress) + sc_ingress_checks "${@:2}" + ;; + --help | -h) + sc_help + ;; + *) + echo "unknown command: $1" + sc_help 1 + exit 1 + ;; + esac + fi + exit 0 } function wc() { - if [[ ${#} == 0 ]] || [[ ${#} == 1 && ${1} == "--logging-enabled" ]]; then - set +e - test_apps_wc "${@:1}" - set -e - log_info "Testing cert-manager\n" - wc_cert_manager_checks - echo - log_info "Testing ingress\n" - wc_ingress_checks - echo - log_info "Testing hnc\n" - wc_hnc_checks - - else - case ${1} in - apps) - test_apps_wc "${@:2}" - ;; - cert-manager) - wc_cert_manager_checks "${@:2}" - ;; - ingress) - wc_ingress_checks "${@:2}" - ;; - hnc) - wc_hnc_checks "${@:2}" - ;; - --help | -h) - wc_help - ;; - *) - echo "unknown command: $1" - wc_help 1 - exit 1 - ;; - esac - fi - exit 0 + if [[ ${#} == 0 ]] || [[ ${#} == 1 && ${1} == "--logging-enabled" ]]; then + set +e + test_apps_wc "${@:1}" + set -e + log_info "Testing cert-manager\n" + wc_cert_manager_checks + echo + log_info "Testing ingress\n" + wc_ingress_checks + echo + log_info "Testing hnc\n" + wc_hnc_checks + + else + case ${1} in + apps) + test_apps_wc "${@:2}" + ;; + cert-manager) + wc_cert_manager_checks "${@:2}" + ;; + ingress) + wc_ingress_checks "${@:2}" + ;; + hnc) + wc_hnc_checks "${@:2}" + ;; + --help | -h) + wc_help + ;; + *) + echo "unknown command: $1" + wc_help 1 + exit 1 + ;; + esac + fi + exit 0 } function main_help() { - echo list of commands: - printf "%-23s %s\n" "help" "show help menu and commands" - printf "%-23s %s\n" "sc" "Run sc checks" - printf "%-23s %s\n" "wc" "Run wc checks" + echo list of commands: + printf "%-23s %s\n" "help" "show help menu and commands" + printf "%-23s %s\n" "sc" "Run sc checks" + printf "%-23s %s\n" "wc" "Run wc checks" - exit "${1:-0}" + exit "${1:-0}" } function main() { - if [[ ${#} == 0 ]]; then - main_help 0 - fi - - case ${1} in - sc) - config_load "$1" - "$1" "${@:2}" - ;; - wc) - config_load "$1" - "$1" "${@:2}" - ;; - esac + if [[ ${#} == 0 ]]; then + main_help 0 + fi + + case ${1} in + sc) + config_load "$1" + "$1" "${@:2}" + ;; + wc) + config_load "$1" + "$1" "${@:2}" + ;; + esac } main "$@" diff --git a/bin/update-ips.bash b/bin/update-ips.bash index bdb5082408..49907011d2 100755 --- a/bin/update-ips.bash +++ b/bin/update-ips.bash @@ -35,10 +35,10 @@ yq_read() { local value for config_file in "${config["override_${cluster}"]}" \ - "${config["override_common"]}" \ - "${config["default_${cluster}"]}" \ - "${config["default_common"]}" - do + "${config["override_common"]}" \ + "${config["default_${cluster}"]}" \ + "${config["default_common"]}"; do + value=$(yq4 "${config_option}" "${config_file}") if [[ "${value}" != "null" ]]; then @@ -87,8 +87,8 @@ yq_eval() { fi diff -U3 --color=always \ - --label "${config_filename}" <(yq4 -P "${config_file}") \ - --label expected <(yq4 -P "${expression}" "${config_file}") > "${out}" && return + --label "${config_filename}" <(yq4 -P "${config_file}") \ + --label expected <(yq4 -P "${expression}" "${config_file}") >"${out}" && return if ${dry_run}; then log_warning "Diff found for ${config_option} in ${config_filename} (diff shows actions needed to be up to date)" @@ -142,7 +142,7 @@ get_kubectl_ips() { mapfile -t ips_wireguard < <("${here}/ops.bash" kubectl "${cluster}" get node "${label_argument}" -o jsonpath='{.items[*].metadata.annotations.projectcalico\.org/IPv4WireguardInterfaceAddr}') local -a ips - read -r -a ips <<< "${ips_internal[*]} ${ips_calico_vxlan[*]} ${ips_calico_ipip[*]} ${ips_wireguard[*]}" + read -r -a ips <<<"${ips_internal[*]} ${ips_calico_vxlan[*]} ${ips_calico_ipip[*]} ${ips_wireguard[*]}" if [ ${#ips[@]} -eq 0 ]; then log_error "No IPs for ${cluster} nodes with label ${label} was found" @@ -229,7 +229,7 @@ get_swift_url() { [[ "${header}" == "x-subject-token:" ]] && os_token="${value}" done swift_url=$(jq -r '.token.catalog[] | select( .type == "object-store" and .name == "swift") | .endpoints[] | select(.interface == "public" and .region == "'"${swift_region}"'") | .url') - } <<< "${response}" + } <<<"${response}" curl -i -s -X DELETE -H "X-Auth-Token: ${os_token}" -H "X-Subject-Token: ${os_token}" "${auth_url}/auth/tokens" >/dev/null @@ -267,7 +267,7 @@ process_ips_to_cidrs() { local -a new_cidrs local -a old_cidrs - readarray -t old_cidrs <<< "$(yq4 "${config_option} | .[]" "${config_file}")" + readarray -t old_cidrs <<<"$(yq4 "${config_option} | .[]" "${config_file}")" for ip in "${@}"; do for cidr in "${old_cidrs[@]}"; do @@ -280,7 +280,7 @@ process_ips_to_cidrs() { new_cidrs+=("${ip}/32") done - yq4 'split(" ") | unique | .[]' <<< "${new_cidrs[@]}" + yq4 'split(" ") | unique | .[]' <<<"${new_cidrs[@]}" } # Parse the host from an URL. @@ -295,11 +295,11 @@ parse_url_port() { port="$(echo "${1}" | sed 's/https\?:\/\///' | sed 's/[A-Za-z.0-9-]*:\?//' | sed 's/\/.*//')" [ -n "${port}" ] && echo "${port}" && return case "${1}" in - http://*) echo 80 ;; - https://*) echo 443 ;; - *) - log_error "Could not determine default port for ${2}, missing protocol: ${1}" - exit 1 + http://*) echo 80 ;; + https://*) echo 443 ;; + *) + log_error "Could not determine default port for ${2}, missing protocol: ${1}" + exit 1 ;; esac } @@ -313,10 +313,10 @@ allow_ips() { shift 2 local -a ips - readarray -t ips <<< "$(sort_ips "${@}")" + readarray -t ips <<<"$(sort_ips "${@}")" local -a cidrs - readarray -t cidrs <<< "$(process_ips_to_cidrs "${config_file}" "${config_option}" "${ips[@]}")" + readarray -t cidrs <<<"$(process_ips_to_cidrs "${config_file}" "${config_option}" "${ips[@]}")" local list list=$(echo "[$(for v in "${cidrs[@]}"; do echo "${v},"; done)]" | yq4 -oj) @@ -348,7 +348,7 @@ allow_domain() { local -a ips dns_ips dns_ips=$(get_dns_ips "${dns_record}") - readarray -t ips <<< "$(echo "${dns_ips}" | tr ' ' '\n')" + readarray -t ips <<<"$(echo "${dns_ips}" | tr ' ' '\n')" allow_ips "${config_file}" "${config_option}" "${ips[@]}" } @@ -357,7 +357,7 @@ allow_domain() { # # Usage: is_ip_address is_ip_address() { - python3 -c "import ipaddress; import sys; ipaddress.ip_address(sys.argv[1])" "${1}" > /dev/null 2>&1 + python3 -c "import ipaddress; import sys; ipaddress.ip_address(sys.argv[1])" "${1}" >/dev/null 2>&1 } # Updates the configuration to allow the host domain or IP address. @@ -377,7 +377,7 @@ allow_host() { fi pod_subnet="$("${here}/ops.bash" kubectl "${cluster}" get configmap --namespace kube-system kubeadm-config --ignore-not-found --output yaml)" - pod_subnet="$(yq4 '.data.ClusterConfiguration | @yamld | .networking.podSubnet // "0.0.0.0/0"' <<< "${pod_subnet}")" + pod_subnet="$(yq4 '.data.ClusterConfiguration | @yamld | .networking.podSubnet // "0.0.0.0/0"' <<<"${pod_subnet}")" log_warning "Found cluster local endpoint ${host} for ${config_option} using ${pod_subnet}" @@ -404,7 +404,7 @@ allow_nodes() { local -a ips kubectl_ips kubectl_ips=$(get_kubectl_ips "${cluster}" "${label}") - readarray -t ips <<< "$(echo "${kubectl_ips}" | tr ' ' '\n')" + readarray -t ips <<<"$(echo "${kubectl_ips}" | tr ' ' '\n')" allow_ips "${config_file}" "${config_option}" "${ips[@]}" } @@ -575,7 +575,7 @@ validate_config() { if [ "${sync_default_buckets}" == "true" ]; then if [ "${harbor_persistence_type}" == "swift" ] || [ "${thanos_object_storage_type}" == "swift" ]; then - destination_swift=true + destination_swift=true fi fi for bucket_type in ${bucket_destination_types}; do diff --git a/bin/upgrade.bash b/bin/upgrade.bash index 1264ec2961..01623b7b6f 100755 --- a/bin/upgrade.bash +++ b/bin/upgrade.bash @@ -111,10 +111,10 @@ apply() { if "${snippet}" rollback; then log_warn "apply snippet rollback success" - exit $(( return + 2 )) # 3 on rollback failure/success 4 on rollback success/success + exit $((return + 2)) # 3 on rollback failure/success 4 on rollback success/success else log_error "apply snippet rollback failure" - exit $(( return )) # 1 on rollback failure/failure 2 on rollback success/failure + exit $((return)) # 1 on rollback failure/failure 2 on rollback success/failure fi fi done diff --git a/helmfile.d/charts/gatekeeper/templates/files/update.sh b/helmfile.d/charts/gatekeeper/templates/files/update.sh index 0fbb6d0135..630f254d10 100755 --- a/helmfile.d/charts/gatekeeper/templates/files/update.sh +++ b/helmfile.d/charts/gatekeeper/templates/files/update.sh @@ -34,7 +34,7 @@ done echo "updating waitFor in values.yaml" -readarray -t templates <<< "$(find "${CHART}/templates/" -type f -name "*.yaml" -not -path "*/wait/*" -not -name "config.yaml")" +readarray -t templates <<<"$(find "${CHART}/templates/" -type f -name "*.yaml" -not -path "*/wait/*" -not -name "config.yaml")" list="$(for template in "${templates[@]}"; do grep "name:" "${template}"; done | sed "s/name: /- /" | sort | yq4 -oj)" diff --git a/helmfile.d/charts/harbor/harbor-backup/scripts/harbor-backup.sh b/helmfile.d/charts/harbor/harbor-backup/scripts/harbor-backup.sh index 2d17293ca4..4040899a79 100644 --- a/helmfile.d/charts/harbor/harbor-backup/scripts/harbor-backup.sh +++ b/helmfile.d/charts/harbor/harbor-backup/scripts/harbor-backup.sh @@ -4,57 +4,57 @@ set -e -o pipefail backup_dir="${BACKUP_DIR:-/backup}" dump_dir="${backup_dir}/dbdump" tarball_dir="${backup_dir}/tarball" -create_dir(){ - echo "creating backup directories" >&2 - mkdir -p "${dump_dir}" - mkdir -p "${tarball_dir}" +create_dir() { + echo "creating backup directories" >&2 + mkdir -p "${dump_dir}" + mkdir -p "${tarball_dir}" } wait_for_db_ready() { - echo "checking connection to ${PG_HOSTNAME}:5432" >&2 - TIMEOUT=12 - while [ $TIMEOUT -gt 0 ]; do - if pg_isready -h "$PG_HOSTNAME" | grep "accepting connections"; then - break - fi - TIMEOUT=$((TIMEOUT - 1)) - sleep 5 - done - if [ $TIMEOUT -eq 0 ]; then - echo "Harbor DB cannot reach within one minute." - exit 1 + echo "checking connection to ${PG_HOSTNAME}:5432" >&2 + TIMEOUT=12 + while [ $TIMEOUT -gt 0 ]; do + if pg_isready -h "$PG_HOSTNAME" | grep "accepting connections"; then + break fi + TIMEOUT=$((TIMEOUT - 1)) + sleep 5 + done + if [ $TIMEOUT -eq 0 ]; then + echo "Harbor DB cannot reach within one minute." + exit 1 + fi } dump_database() { - echo "Dumping database" >&2 - pg_dump -U postgres -h "$PG_HOSTNAME" registry | gzip -c > "${dump_dir}/registry.back.gz" - pg_dump -U postgres -h "$PG_HOSTNAME" postgres | gzip -c > "${dump_dir}/postgres.back.gz" + echo "Dumping database" >&2 + pg_dump -U postgres -h "$PG_HOSTNAME" registry | gzip -c >"${dump_dir}/registry.back.gz" + pg_dump -U postgres -h "$PG_HOSTNAME" postgres | gzip -c >"${dump_dir}/postgres.back.gz" } create_tarball() { - echo "Creating tarball" >&2 - tar zcvf "${tarball_dir}/harbor.tgz" "${dump_dir}" + echo "Creating tarball" >&2 + tar zcvf "${tarball_dir}/harbor.tgz" "${dump_dir}" } s3_upload() { - : "${BUCKET_NAME:?Missing BUCKET_NAME}" - : "${S3_REGION_ENDPOINT:?Missing S3_REGION_ENDPOINT}" - PATH_TO_BACKUP=s3://${BUCKET_NAME}"/backups/"$(date +%s).tgz + : "${BUCKET_NAME:?Missing BUCKET_NAME}" + : "${S3_REGION_ENDPOINT:?Missing S3_REGION_ENDPOINT}" + PATH_TO_BACKUP=s3://${BUCKET_NAME}"/backups/"$(date +%s).tgz - echo "Uploading to s3 bucket ${PATH_TO_BACKUP}" >&2 + echo "Uploading to s3 bucket ${PATH_TO_BACKUP}" >&2 - aws s3 cp "${tarball_dir}/harbor.tgz" "$PATH_TO_BACKUP" --endpoint-url="$S3_REGION_ENDPOINT" + aws s3 cp "${tarball_dir}/harbor.tgz" "$PATH_TO_BACKUP" --endpoint-url="$S3_REGION_ENDPOINT" } s3_get_records() { - before_date="$1" + before_date="$1" - aws s3api list-objects \ - --bucket "${BUCKET_NAME}" \ - --endpoint-url "${S3_REGION_ENDPOINT}" \ - --prefix "backups/" \ - --query "Contents[?LastModified<='${before_date}'][].{Key: Key}" + aws s3api list-objects \ + --bucket "${BUCKET_NAME}" \ + --endpoint-url "${S3_REGION_ENDPOINT}" \ + --prefix "backups/" \ + --query "Contents[?LastModified<='${before_date}'][].{Key: Key}" } s3_remove_path() { @@ -62,7 +62,7 @@ s3_remove_path() { echo "deleting s3://${BUCKET_NAME}/${path}" aws s3 rm "s3://${BUCKET_NAME}/${path}" \ - --endpoint-url "${S3_REGION_ENDPOINT}" + --endpoint-url "${S3_REGION_ENDPOINT}" } gcs_upload() { @@ -91,12 +91,12 @@ gcs_get_records() { # * Select all backups in the backups/ folder # * Select only entries older than given date (epoch time) # * Save name of path to key "Key" - gsutil -o "Credentials:gs_service_key_file=${GCS_KEYFILE}" ls -L "${PATH_TO_BACKUPS}" | \ - yq r - -j | \ + gsutil -o "Credentials:gs_service_key_file=${GCS_KEYFILE}" ls -L "${PATH_TO_BACKUPS}" | + yq r - -j | jq '[to_entries | .[] | ' \ - 'select(( .key | test("^gs:\/\/[^\/]+\/backups\/.+")) and ' \ - '(.value."Update time" | strptime("%a, %d %b %Y %H:%M:%S %Z") | mktime <= '"${before_date}"') ) | ' \ - '{Key: (.key | match("^gs:\/\/[^\/]+\/(.+)").captures[0].string)}]' + 'select(( .key | test("^gs:\/\/[^\/]+\/backups\/.+")) and ' \ + '(.value."Update time" | strptime("%a, %d %b %Y %H:%M:%S %Z") | mktime <= '"${before_date}"') ) | ' \ + '{Key: (.key | match("^gs:\/\/[^\/]+\/(.+)").captures[0].string)}]' } gcs_remove_path() { @@ -162,64 +162,63 @@ azure_remove_path() { --name "${path}" } +remove_old_backups() { + : "${DAYS_TO_RETAIN:?Missing DAYS_TO_RETAIN}" -remove_old_backups () { - : "${DAYS_TO_RETAIN:?Missing DAYS_TO_RETAIN}" + if [[ ${S3_BACKUP} == "true" ]]; then + before_date=$(date --iso-8601=seconds -d "-${DAYS_TO_RETAIN} days") + now=$(date --iso-8601=seconds) - if [[ ${S3_BACKUP} == "true" ]]; then - before_date=$(date --iso-8601=seconds -d "-${DAYS_TO_RETAIN} days") - now=$(date --iso-8601=seconds) + del_records=$(s3_get_records "${before_date}") + all_records=$(s3_get_records "${now}") + elif [[ ${GCS_BACKUP} == "true" ]]; then + before_date=$(date -d "-${DAYS_TO_RETAIN} days" +%s) + now=$(date +%s) - del_records=$(s3_get_records "${before_date}") - all_records=$(s3_get_records "${now}") - elif [[ ${GCS_BACKUP} == "true" ]]; then - before_date=$(date -d "-${DAYS_TO_RETAIN} days" +%s) - now=$(date +%s) + del_records=$(gcs_get_records "${before_date}") + all_records=$(gcs_get_records "${now}") + elif [[ ${AZURE_BACKUP} == "true" ]]; then + before_date=$(date -d "-${DAYS_TO_RETAIN} days" +%s) + now=$(date +%s) - del_records=$(gcs_get_records "${before_date}") - all_records=$(gcs_get_records "${now}") - elif [[ ${AZURE_BACKUP} == "true" ]]; then - before_date=$(date -d "-${DAYS_TO_RETAIN} days" +%s) - now=$(date +%s) + del_records=$(azure_get_records "${before_date}") + all_records=$(azure_get_records "${now}") + fi - del_records=$(azure_get_records "${before_date}") - all_records=$(azure_get_records "${now}") - fi + del_paths=() + all_paths=() - del_paths=() - all_paths=() + _jq() { + echo "${row}" | base64 --decode | jq -r "${1}" + } - _jq() { - echo "${row}" | base64 --decode | jq -r "${1}" - } + for row in $(echo "${del_records}" | jq -r '.[] | @base64'); do + del_paths+=("$(_jq '.Key')") + done - for row in $(echo "${del_records}" | jq -r '.[] | @base64'); do - del_paths+=("$(_jq '.Key')") - done + for row in $(echo "${all_records}" | jq -r '.[] | @base64'); do + all_paths+=("$(_jq '.Key')") + done - for row in $(echo "${all_records}" | jq -r '.[] | @base64'); do - all_paths+=("$(_jq '.Key')") - done + # Number of backups left if all old backups are removed. + left=$(("${#all_paths[@]}" - "${#del_paths[@]}")) - # Number of backups left if all old backups are removed. - left=$(("${#all_paths[@]}" - "${#del_paths[@]}")) + # We ALWAYS keep N backups even if their TTL has expired! + if (("${left}" < "${DAYS_TO_RETAIN}")); then + num_to_delete=$(("${#all_paths[@]}" - "${DAYS_TO_RETAIN}")) + else + num_to_delete="${#del_paths[@]}" + fi - # We ALWAYS keep N backups even if their TTL has expired! - if (( "${left}" < "${DAYS_TO_RETAIN}" )); then - num_to_delete=$(("${#all_paths[@]}" - "${DAYS_TO_RETAIN}")) - else - num_to_delete="${#del_paths[@]}" + for path in "${del_paths[@]::${num_to_delete}}"; do + if [[ ${S3_BACKUP} == "true" ]]; then + s3_remove_path "${path}" + elif [[ ${GCS_BACKUP} == "true" ]]; then + gcs_remove_path "${path}" + elif [[ ${AZURE_BACKUP} == "true" ]]; then + azure_remove_path "${path}" fi - - for path in "${del_paths[@]::${num_to_delete}}"; do - if [[ ${S3_BACKUP} == "true" ]]; then - s3_remove_path "${path}" - elif [[ ${GCS_BACKUP} == "true" ]]; then - gcs_remove_path "${path}" - elif [[ ${AZURE_BACKUP} == "true" ]]; then - azure_remove_path "${path}" - fi - done + done } create_dir diff --git a/helmfile.d/charts/harbor/init-harbor/files/init.sh b/helmfile.d/charts/harbor/init-harbor/files/init.sh index fd6adbce0e..801679cad5 100644 --- a/helmfile.d/charts/harbor/init-harbor/files/init.sh +++ b/helmfile.d/charts/harbor/init-harbor/files/init.sh @@ -2,27 +2,27 @@ set -e validate_harbor() { - echo "testing curl address ${ENDPOINT}" - exists=$(curl -k "${ENDPOINT}"/projects/1 | jq '.code') || { - echo "ERROR L.${LINENO} - Harbor url ${ENDPOINT}/projects/1 cannot be reached." - exit 1 - } - if [ -z "$exists" ]; then - echo "ERROR - Harbor url ${ENDPOINT}/projects/1 did not return any code (probably cannot be reached)" - exit 1 - fi - echo "${exists}" + echo "testing curl address ${ENDPOINT}" + exists=$(curl -k "${ENDPOINT}"/projects/1 | jq '.code') || { + echo "ERROR L.${LINENO} - Harbor url ${ENDPOINT}/projects/1 cannot be reached." + exit 1 + } + if [ -z "$exists" ]; then + echo "ERROR - Harbor url ${ENDPOINT}/projects/1 did not return any code (probably cannot be reached)" + exit 1 + fi + echo "${exists}" } delete_library_project() { - echo Removing project library from harbor - # Curl will return status 500 even though it successfully removed the project. - curl -k -X DELETE -u admin:"${HARBOR_PASSWORD}" "${ENDPOINT}"/projects/1 >/dev/null + echo Removing project library from harbor + # Curl will return status 500 even though it successfully removed the project. + curl -k -X DELETE -u admin:"${HARBOR_PASSWORD}" "${ENDPOINT}"/projects/1 >/dev/null } create_new_private_default_project() { - echo "Creating new private project default" - curl -k -X POST -u admin:"${HARBOR_PASSWORD}" "${ENDPOINT}"/projects --header 'Content-Type: application/json' --header 'Accept: application/json' --data '{ + echo "Creating new private project default" + curl -k -X POST -u admin:"${HARBOR_PASSWORD}" "${ENDPOINT}"/projects --header 'Content-Type: application/json' --header 'Accept: application/json' --data '{ "project_name": "default", "metadata": { "public": "0", @@ -32,33 +32,33 @@ create_new_private_default_project() { "auto_scan": "true" } }' - echo "Private default project created" + echo "Private default project created" } init_harbor_state() { - exists=$(validate_harbor) + exists=$(validate_harbor) - echo "Setting up initial harbor state" - if [ "$exists" != "404" ]; then - name=$(curl -k -X GET "${ENDPOINT}"/projects/1 | jq '.name') + echo "Setting up initial harbor state" + if [ "$exists" != "404" ]; then + name=$(curl -k -X GET "${ENDPOINT}"/projects/1 | jq '.name') - if [ "$name" = "\"library\"" ]; then - delete_library_project - create_new_private_default_project - fi - else - echo "Harbor already created default project" + if [ "$name" = "\"library\"" ]; then + delete_library_project + create_new_private_default_project fi + else + echo "Harbor already created default project" + fi } configure_OIDC() { - echo "Configuring oidc support" - err=$(curl -k -X PUT "${ENDPOINT}/configurations" \ - -u admin:"${HARBOR_PASSWORD}" \ - -H "accept: application/json" \ - -H "Content-Type: application/json" \ - -d "{ \"primary_auth_mode\": true, + echo "Configuring oidc support" + err=$(curl -k -X PUT "${ENDPOINT}/configurations" \ + -u admin:"${HARBOR_PASSWORD}" \ + -H "accept: application/json" \ + -H "Content-Type: application/json" \ + -d "{ \"primary_auth_mode\": true, \"oidc_verify_cert\": ${OIDC_VERIFY_CERT}, \"auth_mode\": \"oidc_auth\", \"self_registration\": false, @@ -69,53 +69,53 @@ configure_OIDC() { \"oidc_client_secret\": \"${OIDC_CLIENT_SECRET}\", \"oidc_admin_group\": \"${OIDC_ADMIN_GROUP_NAME}\", \"oidc_groups_claim\": \"${OIDC_GROUP_CLAIM_NAME}\"}") - if [ -n "$err" ]; then - echo "ERROR when configuring oidc: $err" - exit 1 - fi + if [ -n "$err" ]; then + echo "ERROR when configuring oidc: $err" + exit 1 + fi } configure_GC() { - echo "Configuring GC" + echo "Configuring GC" - if [ "${GC_FORCE_CONFIGURE}" = "false" ]; then - res=$(curl -k -X GET -w "%{http_code}" "${ENDPOINT}/system/gc/schedule" \ - -u admin:"${HARBOR_PASSWORD}") + if [ "${GC_FORCE_CONFIGURE}" = "false" ]; then + res=$(curl -k -X GET -w "%{http_code}" "${ENDPOINT}/system/gc/schedule" \ + -u admin:"${HARBOR_PASSWORD}") - # shellcheck disable=SC3057 - http_code="${res:${#res}-3}" + # shellcheck disable=SC3057 + http_code="${res:${#res}-3}" - if [ "${http_code}" != "200" ]; then - echo "Failed to check if GC is configured: ${res}" - exit 1 - fi + if [ "${http_code}" != "200" ]; then + echo "Failed to check if GC is configured: ${res}" + exit 1 + fi - if [ ${#res} -ne 3 ]; then - echo "GC already configured" - return - fi + if [ ${#res} -ne 3 ]; then + echo "GC already configured" + return fi + fi - err=$(curl -k -X PUT "${ENDPOINT}/system/gc/schedule" \ - -u admin:"${HARBOR_PASSWORD}" \ - -H "accept: application/json" \ - -H "Content-Type: application/json" \ - -d "{ \"parameters\": {}, + err=$(curl -k -X PUT "${ENDPOINT}/system/gc/schedule" \ + -u admin:"${HARBOR_PASSWORD}" \ + -H "accept: application/json" \ + -H "Content-Type: application/json" \ + -d "{ \"parameters\": {}, \"schedule\": { \"cron\": \"${GC_SCHEDULE}\", \"type\": \"Custom\" } }") - if [ -n "$err" ]; then - echo "ERROR when configuring GC: $err" - exit 1 - fi + if [ -n "$err" ]; then + echo "ERROR when configuring GC: $err" + exit 1 + fi } init_harbor_state configure_OIDC if [ "${GC_ENABLED}" = "true" ]; then - configure_GC + configure_GC fi echo "Harbor initialized" diff --git a/helmfile.d/charts/opensearch/backup/scripts/backup.sh b/helmfile.d/charts/opensearch/backup/scripts/backup.sh index 568207c9bd..57098981ca 100644 --- a/helmfile.d/charts/opensearch/backup/scripts/backup.sh +++ b/helmfile.d/charts/opensearch/backup/scripts/backup.sh @@ -9,10 +9,10 @@ set -euo pipefail : "${INDICES:?Missing INDICES}" curl --insecure -s -i -u "${OPENSEARCH_USERNAME}:${OPENSEARCH_PASSWORD}" \ - -XPUT "https://${OPENSEARCH_ENDPOINT}/_snapshot/${SNAPSHOT_REPOSITORY}/snapshot-$(date --utc +%Y%m%d_%H%M%Sz)" \ - -H "Content-Type: application/json" -d' + -XPUT "https://${OPENSEARCH_ENDPOINT}/_snapshot/${SNAPSHOT_REPOSITORY}/snapshot-$(date --utc +%Y%m%d_%H%M%Sz)" \ + -H "Content-Type: application/json" -d' { "indices": "'"${INDICES}"'", "include_global_state": false - }' \ - | tee /dev/stderr | grep "200 OK" + }' | + tee /dev/stderr | grep "200 OK" diff --git a/helmfile.d/charts/opensearch/slm/scripts/slm-retention.bash b/helmfile.d/charts/opensearch/slm/scripts/slm-retention.bash index ebd13c96f1..b1850832bb 100644 --- a/helmfile.d/charts/opensearch/slm/scripts/slm-retention.bash +++ b/helmfile.d/charts/opensearch/slm/scripts/slm-retention.bash @@ -31,113 +31,113 @@ OPENSEARCH_URL="https://${OPENSEARCH_ENDPOINT}" # Snapshots returned from this function should be succeeded, or depending on how it was created, partial. # https://opensearch.org/docs/latest/opensearch/snapshot-restore function get_snapshots { - local url="${OPENSEARCH_URL}/_cat/snapshots/${SNAPSHOT_REPOSITORY}" - curl --insecure "${url}" -f -X GET --max-time "${REQUEST_TIMEOUT_SECONDS}" --no-progress-meter \ - --basic --user "${OPENSEARCH_USERNAME}:${OPENSEARCH_PASSWORD}" + local url="${OPENSEARCH_URL}/_cat/snapshots/${SNAPSHOT_REPOSITORY}" + curl --insecure "${url}" -f -X GET --max-time "${REQUEST_TIMEOUT_SECONDS}" --no-progress-meter \ + --basic --user "${OPENSEARCH_USERNAME}:${OPENSEARCH_PASSWORD}" } function get_snapshot_age { - local snapshots=$1 - local idx=$2 - local snapshot_start_date_seconds - local now_seconds - local age_seconds - - snapshot_start_date_seconds=$(sed "${idx}q;d" <(echo "${snapshots}") | awk '{ print $3 }') - now_seconds=$(date +%s) - age_seconds=$((now_seconds - snapshot_start_date_seconds)) - echo "${age_seconds}" + local snapshots=$1 + local idx=$2 + local snapshot_start_date_seconds + local now_seconds + local age_seconds + + snapshot_start_date_seconds=$(sed "${idx}q;d" <(echo "${snapshots}") | awk '{ print $3 }') + now_seconds=$(date +%s) + age_seconds=$((now_seconds - snapshot_start_date_seconds)) + echo "${age_seconds}" } function remove_snapshots { - local snapshots_to_delete=$1 - local url="${OPENSEARCH_URL}/_snapshot/${SNAPSHOT_REPOSITORY}/${snapshots_to_delete}" - echo "Deleting snapshots: ${snapshots_to_delete}" - curl --insecure "${url}" -f -X DELETE --max-time "${REQUEST_TIMEOUT_SECONDS}" --no-progress-meter \ - --basic --user "${OPENSEARCH_USERNAME}:${OPENSEARCH_PASSWORD}" - echo "" + local snapshots_to_delete=$1 + local url="${OPENSEARCH_URL}/_snapshot/${SNAPSHOT_REPOSITORY}/${snapshots_to_delete}" + echo "Deleting snapshots: ${snapshots_to_delete}" + curl --insecure "${url}" -f -X DELETE --max-time "${REQUEST_TIMEOUT_SECONDS}" --no-progress-meter \ + --basic --user "${OPENSEARCH_USERNAME}:${OPENSEARCH_PASSWORD}" + echo "" } function check_snapshot_count { - local snapshot_count=$1 - if [ "${snapshot_count}" -le "${MIN_SNAPSHOTS}" ]; then - echo "Snapshot count: ${snapshot_count} fewer than minimum: ${MIN_SNAPSHOTS}, do nothing" - return 1 - fi + local snapshot_count=$1 + if [ "${snapshot_count}" -le "${MIN_SNAPSHOTS}" ]; then + echo "Snapshot count: ${snapshot_count} fewer than minimum: ${MIN_SNAPSHOTS}, do nothing" + return 1 + fi } function check_old_snapshots { - local snapshots=$1 - if [ "$(get_snapshot_age "${snapshots}" 1)" -le "${MAX_AGE_SECONDS}" ]; then - echo "No old snapshots" - return 1 - fi + local snapshots=$1 + if [ "$(get_snapshot_age "${snapshots}" 1)" -le "${MAX_AGE_SECONDS}" ]; then + echo "No old snapshots" + return 1 + fi } function remove_old_snapshots { - local idx=1 - local snapshots - local snapshot_count - local snapshots_to_delete="" - - echo "Checking for old snapshots." - - snapshots=$(get_snapshots) - if [[ -z ${snapshots} ]]; then - snapshot_count=0 - else - snapshot_count=$(echo "${snapshots}" | wc -l) - fi + local idx=1 + local snapshots + local snapshot_count + local snapshots_to_delete="" + + echo "Checking for old snapshots." - check_snapshot_count "${snapshot_count}" || return 0 - check_old_snapshots "${snapshots}" || return 0 - - while [ $((snapshot_count - idx )) -ge "${MIN_SNAPSHOTS}" ]; do - local age_seconds - age_seconds=$(get_snapshot_age "${snapshots}" "${idx}") - if [ "${age_seconds}" -gt "${MAX_AGE_SECONDS}" ]; then - local snapshot_name - snapshot_name=$(sed "${idx}q;d" <(echo "${snapshots}") | awk '{ print $1 }') - echo "Snapshot ${snapshot_name} is ${age_seconds} s old, max ${MAX_AGE_SECONDS} s" - snapshots_to_delete="${snapshots_to_delete}${snapshot_name}," - fi - idx=$((idx + 1)) - done - if [ -n "${snapshots_to_delete}" ]; then - remove_snapshots "${snapshots_to_delete}" + snapshots=$(get_snapshots) + if [[ -z ${snapshots} ]]; then + snapshot_count=0 + else + snapshot_count=$(echo "${snapshots}" | wc -l) + fi + + check_snapshot_count "${snapshot_count}" || return 0 + check_old_snapshots "${snapshots}" || return 0 + + while [ $((snapshot_count - idx)) -ge "${MIN_SNAPSHOTS}" ]; do + local age_seconds + age_seconds=$(get_snapshot_age "${snapshots}" "${idx}") + if [ "${age_seconds}" -gt "${MAX_AGE_SECONDS}" ]; then + local snapshot_name + snapshot_name=$(sed "${idx}q;d" <(echo "${snapshots}") | awk '{ print $1 }') + echo "Snapshot ${snapshot_name} is ${age_seconds} s old, max ${MAX_AGE_SECONDS} s" + snapshots_to_delete="${snapshots_to_delete}${snapshot_name}," fi + idx=$((idx + 1)) + done + if [ -n "${snapshots_to_delete}" ]; then + remove_snapshots "${snapshots_to_delete}" + fi } function remove_excess_snapshots { - local idx=1 - local snapshots - local snapshot_count - local snapshots_to_delete="" - - echo "Checking number of snapshots." - - snapshots=$(get_snapshots) - if [[ -z ${snapshots} ]]; then - snapshot_count=0 - else - snapshot_count=$(echo "${snapshots}" | wc -l) - fi - echo "Number of snapshots: $snapshot_count" - - check_snapshot_count "${snapshot_count}" || return 0 - - while [ $((snapshot_count - idx )) -ge "${MAX_SNAPSHOTS}" ]; do - local snapshot_name - snapshot_name=$(sed "${idx}q;d" <(echo "${snapshots}") | awk '{ print $1 }') - echo "Too many snapshots: $snapshot_count, max ${MAX_SNAPSHOTS}" - snapshots_to_delete="${snapshots_to_delete}${snapshot_name}," - idx=$((idx + 1)) - done - if [ -n "${snapshots_to_delete}" ]; then - remove_snapshots "${snapshots_to_delete}" - else - echo "Snapshot count: ${snapshot_count} is not more than maximum: ${MAX_SNAPSHOTS}, do nothing" - fi + local idx=1 + local snapshots + local snapshot_count + local snapshots_to_delete="" + + echo "Checking number of snapshots." + + snapshots=$(get_snapshots) + if [[ -z ${snapshots} ]]; then + snapshot_count=0 + else + snapshot_count=$(echo "${snapshots}" | wc -l) + fi + echo "Number of snapshots: $snapshot_count" + + check_snapshot_count "${snapshot_count}" || return 0 + + while [ $((snapshot_count - idx)) -ge "${MAX_SNAPSHOTS}" ]; do + local snapshot_name + snapshot_name=$(sed "${idx}q;d" <(echo "${snapshots}") | awk '{ print $1 }') + echo "Too many snapshots: $snapshot_count, max ${MAX_SNAPSHOTS}" + snapshots_to_delete="${snapshots_to_delete}${snapshot_name}," + idx=$((idx + 1)) + done + if [ -n "${snapshots_to_delete}" ]; then + remove_snapshots "${snapshots_to_delete}" + else + echo "Snapshot count: ${snapshot_count} is not more than maximum: ${MAX_SNAPSHOTS}, do nothing" + fi } echo "SLM retention procedure started" diff --git a/helmfile.d/hooks/create-from-manifest.sh b/helmfile.d/hooks/create-from-manifest.sh index ddf299193d..7d03d9eb5e 100755 --- a/helmfile.d/hooks/create-from-manifest.sh +++ b/helmfile.d/hooks/create-from-manifest.sh @@ -20,7 +20,7 @@ create_from_manifest() { exit 1 fi - if "${ck8s}" ops kubectl "${cluster}" get -f "$1" > /dev/null; then + if "${ck8s}" ops kubectl "${cluster}" get -f "$1" >/dev/null; then echo "note: resources already created from manifest { ${file} }" else echo "note: creating resources from manifest { ${file} }" diff --git a/migration/create-migration-document.sh b/migration/create-migration-document.sh index 7e4a07370a..afd4d7e1b5 100755 --- a/migration/create-migration-document.sh +++ b/migration/create-migration-document.sh @@ -3,13 +3,13 @@ set -euo pipefail usage() { - echo "This script must have apps old and new major versions as arguments." - echo "Usage: $0 [old_version] [new_version]" - echo "Example: ./create-migration-document.sh v0.22 v0.23" + echo "This script must have apps old and new major versions as arguments." + echo "Usage: $0 [old_version] [new_version]" + echo "Example: ./create-migration-document.sh v0.22 v0.23" } -if [ $# -lt 2 ]; then - usage - exit 1 +if [ $# -lt 2 ]; then + usage + exit 1 fi here="$(dirname "$(readlink -f "$0")")" @@ -28,24 +28,24 @@ if [[ ! "${reply}" =~ ^[yY]$ ]]; then fi if [ -d "${folder_name}" ]; then - echo "- ${folder_name} directory exists" + echo "- ${folder_name} directory exists" else - mkdir "${folder_name}" - echo "- ${folder_name} directory created" + mkdir "${folder_name}" + echo "- ${folder_name} directory created" fi if [ -f "${folder_name}/README.md" ]; then - echo -n "- ${folder_name}/README.md exists. Do you want to replace it? (y/N): " - read -r reply - if [[ ${reply} =~ ^[yY]$ ]]; then - # shellcheck disable=SC2016 - envsubst '$new_version$old_version' < "${here}/template/README.md" > "${folder_name}/README.md" - echo "- ${folder_name}/README.md replaced" - fi -else + echo -n "- ${folder_name}/README.md exists. Do you want to replace it? (y/N): " + read -r reply + if [[ ${reply} =~ ^[yY]$ ]]; then # shellcheck disable=SC2016 - envsubst '$new_version$old_version' < "${here}/template/README.md" > "${folder_name}/README.md" - echo "- ${folder_name}/README.md created" + envsubst '$new_version$old_version' <"${here}/template/README.md" >"${folder_name}/README.md" + echo "- ${folder_name}/README.md replaced" + fi +else + # shellcheck disable=SC2016 + envsubst '$new_version$old_version' <"${here}/template/README.md" >"${folder_name}/README.md" + echo "- ${folder_name}/README.md created" fi cp -r "${here}/template/apply" "${folder_name}/" diff --git a/migration/template/prepare/50-init.sh b/migration/template/prepare/50-init.sh index 481a9351e0..731961580a 100755 --- a/migration/template/prepare/50-init.sh +++ b/migration/template/prepare/50-init.sh @@ -7,10 +7,10 @@ ROOT="$(readlink -f "${HERE}/../../../")" source "${ROOT}/scripts/migration/lib.sh" case "${CK8S_CLUSTER}" in - both|sc|wc) - "${ROOT}/bin/ck8s" init "${CK8S_CLUSTER}" - ;; - *) - log_fatal "usage: 50-init.sh " - ;; +both | sc | wc) + "${ROOT}/bin/ck8s" init "${CK8S_CLUSTER}" + ;; +*) + log_fatal "usage: 50-init.sh " + ;; esac diff --git a/migration/v0.39/prepare/50-init.sh b/migration/v0.39/prepare/50-init.sh index 481a9351e0..731961580a 100755 --- a/migration/v0.39/prepare/50-init.sh +++ b/migration/v0.39/prepare/50-init.sh @@ -7,10 +7,10 @@ ROOT="$(readlink -f "${HERE}/../../../")" source "${ROOT}/scripts/migration/lib.sh" case "${CK8S_CLUSTER}" in - both|sc|wc) - "${ROOT}/bin/ck8s" init "${CK8S_CLUSTER}" - ;; - *) - log_fatal "usage: 50-init.sh " - ;; +both | sc | wc) + "${ROOT}/bin/ck8s" init "${CK8S_CLUSTER}" + ;; +*) + log_fatal "usage: 50-init.sh " + ;; esac diff --git a/migration/v0.39/prepare/60-adopt-dns-records.sh b/migration/v0.39/prepare/60-adopt-dns-records.sh index 4476c4be92..2b22f5546c 100755 --- a/migration/v0.39/prepare/60-adopt-dns-records.sh +++ b/migration/v0.39/prepare/60-adopt-dns-records.sh @@ -50,29 +50,28 @@ export AWS_SECRET_ACCESS_KEY="${aws_secret_key_id}" log_info "Fetching records..." records=$(mktemp --suffix="aws_records.json") -aws route53 list-resource-record-sets --hosted-zone-id "${hostedZoneId}" --output json > "${records}" +aws route53 list-resource-record-sets --hosted-zone-id "${hostedZoneId}" --output json >"${records}" log_info "Filtering..." -baseDomain=$(yq4 ".global.baseDomain" "$CK8S_CONFIG_PATH/common-config.yaml" ) +baseDomain=$(yq4 ".global.baseDomain" "$CK8S_CONFIG_PATH/common-config.yaml") baseDomainRecords=$(jq ".ResourceRecordSets[] | select(.Name | test(\".*.${baseDomain}.$\"))" "${records}") ARecords=$(echo "${baseDomainRecords}" | jq '. | select(.Type == "A")') -readarray -t recordNames < <(echo "${ARecords}" | jq -r '.Name' ) +readarray -t recordNames < <(echo "${ARecords}" | jq -r '.Name') log_info "Fetching txtOwnerId..." -ownerId=$(yq4 ".externalDns.txtOwnerId" <(yq_merge "$CK8S_CONFIG_PATH/common-config.yaml" "$CK8S_CONFIG_PATH/sc-config.yaml" "$CK8S_CONFIG_PATH/wc-config.yaml") ) +ownerId=$(yq4 ".externalDns.txtOwnerId" <(yq_merge "$CK8S_CONFIG_PATH/common-config.yaml" "$CK8S_CONFIG_PATH/sc-config.yaml" "$CK8S_CONFIG_PATH/wc-config.yaml")) if [[ "${ownerId}" == "null" ]]; then log_fatal "Missing txtOwnerId!" fi log_info "Fetching txtPrefix..." -txtPrefix=$(yq4 ".externalDns.txtPrefix" <(yq_merge "$CK8S_CONFIG_PATH/common-config.yaml" "$CK8S_CONFIG_PATH/sc-config.yaml" "$CK8S_CONFIG_PATH/wc-config.yaml") ) +txtPrefix=$(yq4 ".externalDns.txtPrefix" <(yq_merge "$CK8S_CONFIG_PATH/common-config.yaml" "$CK8S_CONFIG_PATH/sc-config.yaml" "$CK8S_CONFIG_PATH/wc-config.yaml")) if [[ "${txtPrefix}" == "null" ]]; then - txtPrefix=""; + txtPrefix="" fi recordFile=$(mktemp --suffix="record-file.json") -for record in "${recordNames[@]}" -do +for record in "${recordNames[@]}"; do record="${record/\\052/"*"}" log_info "Creating owner record for ${record}..." echo "{ @@ -92,6 +91,6 @@ do } } ] - }" > "${recordFile}" + }" >"${recordFile}" aws route53 change-resource-record-sets --no-cli-pager --hosted-zone-id "${hostedZoneId}" --change-batch "file://${recordFile}" done diff --git a/migration/v0.40/prepare/50-init.sh b/migration/v0.40/prepare/50-init.sh index 481a9351e0..731961580a 100755 --- a/migration/v0.40/prepare/50-init.sh +++ b/migration/v0.40/prepare/50-init.sh @@ -7,10 +7,10 @@ ROOT="$(readlink -f "${HERE}/../../../")" source "${ROOT}/scripts/migration/lib.sh" case "${CK8S_CLUSTER}" in - both|sc|wc) - "${ROOT}/bin/ck8s" init "${CK8S_CLUSTER}" - ;; - *) - log_fatal "usage: 50-init.sh " - ;; +both | sc | wc) + "${ROOT}/bin/ck8s" init "${CK8S_CLUSTER}" + ;; +*) + log_fatal "usage: 50-init.sh " + ;; esac diff --git a/migration/v0.41/prepare/50-init.sh b/migration/v0.41/prepare/50-init.sh index 481a9351e0..731961580a 100755 --- a/migration/v0.41/prepare/50-init.sh +++ b/migration/v0.41/prepare/50-init.sh @@ -7,10 +7,10 @@ ROOT="$(readlink -f "${HERE}/../../../")" source "${ROOT}/scripts/migration/lib.sh" case "${CK8S_CLUSTER}" in - both|sc|wc) - "${ROOT}/bin/ck8s" init "${CK8S_CLUSTER}" - ;; - *) - log_fatal "usage: 50-init.sh " - ;; +both | sc | wc) + "${ROOT}/bin/ck8s" init "${CK8S_CLUSTER}" + ;; +*) + log_fatal "usage: 50-init.sh " + ;; esac diff --git a/migration/v0.42/apply/10-upgrade-grafana.sh b/migration/v0.42/apply/10-upgrade-grafana.sh index e539c5d0bc..b50af8515d 100755 --- a/migration/v0.42/apply/10-upgrade-grafana.sh +++ b/migration/v0.42/apply/10-upgrade-grafana.sh @@ -28,8 +28,8 @@ run() { clusters_monitoring=$(yq4 '[.global.clustersMonitoring[] | {"name": .}]' "${CK8S_CONFIG_PATH}/sc-config.yaml" -ojson) export clusters_monitoring - user_grafana_cm=$(kubectl_do sc get cm user-grafana -n monitoring -o=jsonpath='{.data.datasources\.yaml}' \ - | yq4 '.deleteDatasources = env(clusters_monitoring)' -o json) + user_grafana_cm=$(kubectl_do sc get cm user-grafana -n monitoring -o=jsonpath='{.data.datasources\.yaml}' | + yq4 '.deleteDatasources = env(clusters_monitoring)' -o json) PATCH="[ { diff --git a/migration/v0.42/prepare/50-init.sh b/migration/v0.42/prepare/50-init.sh index 481a9351e0..731961580a 100755 --- a/migration/v0.42/prepare/50-init.sh +++ b/migration/v0.42/prepare/50-init.sh @@ -7,10 +7,10 @@ ROOT="$(readlink -f "${HERE}/../../../")" source "${ROOT}/scripts/migration/lib.sh" case "${CK8S_CLUSTER}" in - both|sc|wc) - "${ROOT}/bin/ck8s" init "${CK8S_CLUSTER}" - ;; - *) - log_fatal "usage: 50-init.sh " - ;; +both | sc | wc) + "${ROOT}/bin/ck8s" init "${CK8S_CLUSTER}" + ;; +*) + log_fatal "usage: 50-init.sh " + ;; esac diff --git a/migration/v0.43/prepare/50-init.sh b/migration/v0.43/prepare/50-init.sh index 481a9351e0..731961580a 100755 --- a/migration/v0.43/prepare/50-init.sh +++ b/migration/v0.43/prepare/50-init.sh @@ -7,10 +7,10 @@ ROOT="$(readlink -f "${HERE}/../../../")" source "${ROOT}/scripts/migration/lib.sh" case "${CK8S_CLUSTER}" in - both|sc|wc) - "${ROOT}/bin/ck8s" init "${CK8S_CLUSTER}" - ;; - *) - log_fatal "usage: 50-init.sh " - ;; +both | sc | wc) + "${ROOT}/bin/ck8s" init "${CK8S_CLUSTER}" + ;; +*) + log_fatal "usage: 50-init.sh " + ;; esac diff --git a/pipeline/cleanup-docker-image.bash b/pipeline/cleanup-docker-image.bash index 5cb6f00c2a..657c1315ad 100755 --- a/pipeline/cleanup-docker-image.bash +++ b/pipeline/cleanup-docker-image.bash @@ -4,45 +4,45 @@ set -eu -o pipefail if [ -z ${GITHUB_SHA+x} ]; then - echo "GITHUB_SHA not set, skipping Docker tag deletion." >&2 + echo "GITHUB_SHA not set, skipping Docker tag deletion." >&2 else - : "${GITHUB_ACTOR:?Missing GITHUB_ACTOR}" - : "${GITHUB_TOKEN:?Missing GITHUB_TOKEN}" + : "${GITHUB_ACTOR:?Missing GITHUB_ACTOR}" + : "${GITHUB_TOKEN:?Missing GITHUB_TOKEN}" - # At the moment you need to know the version id to delete a container version. - # To fetch it you need to list all versions and fetch the ID of the one with the correct tag. - # One entry in the list of versions looks something like this: - # { - # "id": 123456, - # ... - # "metadata": { - # "container": { - # "tags": [ - # "${GITHUB_SHA}" - # ] - # } - # } - # } - # So to fetch the id we need to match to any entry that has the commit hash and fetch that id - # The jq filter here is not foolproof and can return multiple versions if there's more than one version ID with the same tag. - VERSION_ID=$(curl -s \ - -H "Accept: application/vnd.github+json" \ - -H "Authorization: Bearer ${GITHUB_TOKEN}" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - https://api.github.com/orgs/elastisys/packages/container/compliantkubernetes-apps-pipeline/versions | \ - jq '.[] | select(.metadata.container.tags | any(. == "'"${GITHUB_SHA}"'")).id') + # At the moment you need to know the version id to delete a container version. + # To fetch it you need to list all versions and fetch the ID of the one with the correct tag. + # One entry in the list of versions looks something like this: + # { + # "id": 123456, + # ... + # "metadata": { + # "container": { + # "tags": [ + # "${GITHUB_SHA}" + # ] + # } + # } + # } + # So to fetch the id we need to match to any entry that has the commit hash and fetch that id + # The jq filter here is not foolproof and can return multiple versions if there's more than one version ID with the same tag. + VERSION_ID=$(curl -s \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/orgs/elastisys/packages/container/compliantkubernetes-apps-pipeline/versions | + jq '.[] | select(.metadata.container.tags | any(. == "'"${GITHUB_SHA}"'")).id') - echo "Deleting Github package tag: ${GITHUB_SHA} (Version ID: ${VERSION_ID})" >&2 + echo "Deleting Github package tag: ${GITHUB_SHA} (Version ID: ${VERSION_ID})" >&2 - if [[ ! "${VERSION_ID}" =~ ^[0-9]+$ ]]; then - echo "Version ID not correctly formatted" >&2 - exit 1 - fi + if [[ ! "${VERSION_ID}" =~ ^[0-9]+$ ]]; then + echo "Version ID not correctly formatted" >&2 + exit 1 + fi - curl -s \ - -X DELETE \ - -H "Accept: application/vnd.github+json" \ - -H "Authorization: Bearer ${GITHUB_TOKEN}"\ - -H "X-GitHub-Api-Version: 2022-11-28" \ - "https://api.github.com/orgs/elastisys/packages/container/compliantkubernetes-apps-pipeline/versions/${VERSION_ID}" + curl -s \ + -X DELETE \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "https://api.github.com/orgs/elastisys/packages/container/compliantkubernetes-apps-pipeline/versions/${VERSION_ID}" fi diff --git a/pipeline/common.bash b/pipeline/common.bash index 3e42e9bee7..7c3ac6709b 100644 --- a/pipeline/common.bash +++ b/pipeline/common.bash @@ -12,14 +12,14 @@ export CK8S_CONFIG_PATH export XDG_DATA_HOME="/root/.config" config_update() { - yq4 -i "${2} = \"${3}\"" "${CK8S_CONFIG_PATH}/${1}-config.yaml" + yq4 -i "${2} = \"${3}\"" "${CK8S_CONFIG_PATH}/${1}-config.yaml" } secrets_update() { - local secrets_yaml="${CK8S_CONFIG_PATH}/secrets.yaml" - # TODO: install editor in pipeline and set TERM properly to write using - # `sops --set` instead. - sops --config "${CK8S_CONFIG_PATH}/.sops.yaml" -d -i "${secrets_yaml}" - yq4 -i "${1} = \"${2}\"" "${secrets_yaml}" - sops --config "${CK8S_CONFIG_PATH}/.sops.yaml" -e -i "${secrets_yaml}" + local secrets_yaml="${CK8S_CONFIG_PATH}/secrets.yaml" + # TODO: install editor in pipeline and set TERM properly to write using + # `sops --set` instead. + sops --config "${CK8S_CONFIG_PATH}/.sops.yaml" -d -i "${secrets_yaml}" + yq4 -i "${1} = \"${2}\"" "${secrets_yaml}" + sops --config "${CK8S_CONFIG_PATH}/.sops.yaml" -e -i "${secrets_yaml}" } diff --git a/pipeline/opensearch.bash b/pipeline/opensearch.bash index 378c9005d7..bcc19b25dd 100755 --- a/pipeline/opensearch.bash +++ b/pipeline/opensearch.bash @@ -5,11 +5,12 @@ set -eu opensearch_url=https://opensearch.ops.pipeline-exoscale.elastisys.se/api/status retries=60 while [ ${retries} -gt 0 ]; do - result="$(curl --connect-timeout 20 --max-time 60 -ksIL -o /dev/null -w "%{http_code}" $opensearch_url || true)" - [[ "${result}" == "401" ]] && echo "Opensearch is ready. Got status ${result}"; break - echo "Waiting for OpenSearch to be ready. Got status ${result}" - sleep 10 - retries=$((retries-1)) + result="$(curl --connect-timeout 20 --max-time 60 -ksIL -o /dev/null -w "%{http_code}" $opensearch_url || true)" + [[ "${result}" == "401" ]] && echo "Opensearch is ready. Got status ${result}" + break + echo "Waiting for OpenSearch to be ready. Got status ${result}" + sleep 10 + retries=$((retries - 1)) done ./apps/bin/ck8s ops kubectl wc -n fluentd rollout restart daemonset fluentd-fluentd-elasticsearch ./apps/bin/ck8s ops kubectl wc -n kube-system rollout restart daemonset fluentd-system-fluentd-elasticsearch diff --git a/pipeline/setup-pgp.bash b/pipeline/setup-pgp.bash index cfd5250af8..97662ebfff 100755 --- a/pipeline/setup-pgp.bash +++ b/pipeline/setup-pgp.bash @@ -5,15 +5,15 @@ set -eu -o pipefail : "${PGP_KEY:?Missing PGP_KEY}" : "${PGP_PASSPHRASE:?Missing PGP_PASSPHRASE}" -echo "${PGP_PASSPHRASE}" | \ - gpg --pinentry-mode loopback --passphrase-fd 0 --import \ +echo "${PGP_PASSPHRASE}" | + gpg --pinentry-mode loopback --passphrase-fd 0 --import \ <(echo "${PGP_KEY}") -echo allow-preset-passphrase > ~/.gnupg/gpg-agent.conf +echo allow-preset-passphrase >~/.gnupg/gpg-agent.conf gpg-connect-agent reloadagent /bye keys=$(gpg --list-keys --with-colons --with-keygrip) keygrip=$(echo "${keys}" | awk -F: '$1 == "grp" {print $10;}' | tail -n1) -echo "${PGP_PASSPHRASE}" | \ - /usr/lib/gnupg2/gpg-preset-passphrase --preset "${keygrip}" +echo "${PGP_PASSPHRASE}" | + /usr/lib/gnupg2/gpg-preset-passphrase --preset "${keygrip}" diff --git a/pipeline/test/services/common/testPersistentVolumeClaims.sh b/pipeline/test/services/common/testPersistentVolumeClaims.sh index 2b7a10d9d2..56321e515b 100755 --- a/pipeline/test/services/common/testPersistentVolumeClaims.sh +++ b/pipeline/test/services/common/testPersistentVolumeClaims.sh @@ -17,10 +17,10 @@ testPersistentVolumeClaims() { output="$namespace/$name: $phase" if [ "$phase" = Bound ]; then echo "$output ✔" - SUCCESSES=$((SUCCESSES+1)) + SUCCESSES=$((SUCCESSES + 1)) else echo "$output ❌" - FAILURES=$((FAILURES+1)) + FAILURES=$((FAILURES + 1)) DEBUG_OUTPUT+=$pvc fi done < <(echo "$pvcs" | jq -c '.items[]') diff --git a/pipeline/test/services/funcs.sh b/pipeline/test/services/funcs.sh index e172a8974b..98053318ca 100644 --- a/pipeline/test/services/funcs.sh +++ b/pipeline/test/services/funcs.sh @@ -1,13 +1,12 @@ #!/usr/bin/env bash LOGGING=${LOGGING:-""} PIPELINE=${PIPELINE:-false} -if [ -z "$PIPELINE" ] -then - RETRY_COUNT=6 - RETRY_WAIT=10 +if [ -z "$PIPELINE" ]; then + RETRY_COUNT=6 + RETRY_WAIT=10 else - RETRY_COUNT=24 - RETRY_WAIT=10 + RETRY_COUNT=24 + RETRY_WAIT=10 fi # Args: @@ -15,28 +14,29 @@ fi # 2. namespace # 3. name of resource function testResourceExistence { - if kubectl get "$1" -n "$2" "$3" &> /dev/null - then - echo -n -e "\texists ✔"; SUCCESSES=$((SUCCESSES+1)) - return 0 - else - echo -n -e "\tmissing ❌"; FAILURES=$((FAILURES+1)) - return 1 - fi + if kubectl get "$1" -n "$2" "$3" &>/dev/null; then + echo -n -e "\texists ✔" + SUCCESSES=$((SUCCESSES + 1)) + return 0 + else + echo -n -e "\tmissing ❌" + FAILURES=$((FAILURES + 1)) + return 1 + fi } # Makes dataset smaller for optimization # Args: # 1. kind function getStatus() { - kind="${1}" - jsonData=$(kubectl get "${kind}" --all-namespaces -o json) - lessData=$(echo "${jsonData}" | - jq '.items[] | + kind="${1}" + jsonData=$(kubectl get "${kind}" --all-namespaces -o json) + lessData=$(echo "${jsonData}" | + jq '.items[] | {kind: .kind , name: .metadata.name , namespace: .metadata.namespace , status: .status.readyReplicas , replicas: .status.replicas , numberReady: .status.numberReady , desiredNumberScheduled: .status.desiredNumberScheduled}') - echo "${lessData}" + echo "${lessData}" } # Args: @@ -44,23 +44,25 @@ function getStatus() { # 2. namespace # 3. name of resource function testResourceExistenceFast { - kind="${1}" - namespace="${2}" - currentResource="${3}" - simpleData="${4}" - activeResourceStatus=$(echo "${simpleData}" | - jq -r --arg name "${currentResource}" --arg namespace "${namespace}" --arg kind "${kind}" '. | + kind="${1}" + namespace="${2}" + currentResource="${3}" + simpleData="${4}" + activeResourceStatus=$(echo "${simpleData}" | + jq -r --arg name "${currentResource}" --arg namespace "${namespace}" --arg kind "${kind}" '. | select(.name==$name and .namespace==$namespace and .kind==$kind) | .status') - echo -n "${currentResource}" - if [[ -z "${activeResourceStatus}" ]]; then - echo -n -e "\texists ❌"; FAILURES=$((FAILURES+1)) - echo -e "\tready ❌"; FAILURES=$((FAILURES+1)) - else - echo -n -e "\texists ✔" - resourceReplicaCompare "${kind}" "${namespace}" "${currentResource}" "${simpleData}" - fi + echo -n "${currentResource}" + if [[ -z "${activeResourceStatus}" ]]; then + echo -n -e "\texists ❌" + FAILURES=$((FAILURES + 1)) + echo -e "\tready ❌" + FAILURES=$((FAILURES + 1)) + else + echo -n -e "\texists ✔" + resourceReplicaCompare "${kind}" "${namespace}" "${currentResource}" "${simpleData}" + fi } # This function checks if the amount of replicas for a deployment, daemonset or statefulset are correct @@ -70,59 +72,61 @@ function testResourceExistenceFast { # 3. name of resource # 4. jsonData function resourceReplicaCompare() { - kind="${1}" - namespace="${2}" - resourceName="${3}" - simpleData="${4}" - retriesLeft="${RETRY_COUNT}" - while [[ "${retriesLeft}" -gt 0 ]]; do - if [[ "${kind}" == "Deployment" || "${kind}" == "StatefulSet" ]]; then - activeResourceStatus=$(echo "${simpleData}" | - jq -r --arg name "${resourceName}" --arg kind "${kind}" '. | + kind="${1}" + namespace="${2}" + resourceName="${3}" + simpleData="${4}" + retriesLeft="${RETRY_COUNT}" + while [[ "${retriesLeft}" -gt 0 ]]; do + if [[ "${kind}" == "Deployment" || "${kind}" == "StatefulSet" ]]; then + activeResourceStatus=$(echo "${simpleData}" | + jq -r --arg name "${resourceName}" --arg kind "${kind}" '. | select(.kind==$kind and .name==$name) | .status') - desiredResourceStatus=$(echo "${simpleData}" | - jq -r --arg name "${resourceName}" --arg kind "${kind}" '. | + desiredResourceStatus=$(echo "${simpleData}" | + jq -r --arg name "${resourceName}" --arg kind "${kind}" '. | select(.kind==$kind and .name==$name) | .replicas') - # JSON data structure for daemonsets is different from deployments and statefulsets, - # can not check amount of replicas in the exact same way - elif [[ "${kind}" == "DaemonSet" ]]; then - activeResourceStatus=$(echo "${simpleData}" | - jq -r --arg name "${resourceName}" --arg kind "${kind}" '. | + # JSON data structure for daemonsets is different from deployments and statefulsets, + # can not check amount of replicas in the exact same way + elif [[ "${kind}" == "DaemonSet" ]]; then + activeResourceStatus=$(echo "${simpleData}" | + jq -r --arg name "${resourceName}" --arg kind "${kind}" '. | select(.kind==$kind and .name==$name) | .numberReady') - desiredResourceStatus=$(echo "${simpleData}" | - jq -r --arg name "${resourceName}" --arg kind "${kind}" '. | + desiredResourceStatus=$(echo "${simpleData}" | + jq -r --arg name "${resourceName}" --arg kind "${kind}" '. | select(.kind==$kind and .name==$name) | .desiredNumberScheduled') - fi - - if [[ "${activeResourceStatus}" == "${desiredResourceStatus}" ]]; then - echo -e "\tready ✔"; SUCCESSES=$((SUCCESSES+1)) - if [ "$LOGGING" == "--logging-enabled" ]; then - writeLog "${namespace}" "${resourceName}" "Pod" - writeLog "${namespace}" "${resourceName}" "${kind}" - writeEvent "${namespace}" "${resourceName}" "Pod" - fi - return - else - sleep "${RETRY_WAIT}" - retriesLeft=$((retriesLeft-1)) - # refresh jsonData - simpleData="$(getStatus "${kind}")" - fi - done + fi - echo -e "\tready ❌"; FAILURES=$((FAILURES+1)) - DEBUG_OUTPUT+=$(kubectl get "${kind}" -n "${namespace}" "${resourceName}" -o json) - if [ "$LOGGING" == "--logging-enabled" ]; then - writeLog "${namespace}" "${resourceName}" "Pod" - writeLog "${namespace}" "${resourceName}" "${kind}" - writeEvent "${namespace}" "${resourceName}" "Pod" + if [[ "${activeResourceStatus}" == "${desiredResourceStatus}" ]]; then + echo -e "\tready ✔" + SUCCESSES=$((SUCCESSES + 1)) + if [ "$LOGGING" == "--logging-enabled" ]; then + writeLog "${namespace}" "${resourceName}" "Pod" + writeLog "${namespace}" "${resourceName}" "${kind}" + writeEvent "${namespace}" "${resourceName}" "Pod" + fi + return + else + sleep "${RETRY_WAIT}" + retriesLeft=$((retriesLeft - 1)) + # refresh jsonData + simpleData="$(getStatus "${kind}")" fi + done + + echo -e "\tready ❌" + FAILURES=$((FAILURES + 1)) + DEBUG_OUTPUT+=$(kubectl get "${kind}" -n "${namespace}" "${resourceName}" -o json) + if [ "$LOGGING" == "--logging-enabled" ]; then + writeLog "${namespace}" "${resourceName}" "Pod" + writeLog "${namespace}" "${resourceName}" "${kind}" + writeEvent "${namespace}" "${resourceName}" "Pod" + fi } # This function is required for statefulsets with update strategy OnDelete @@ -131,22 +135,24 @@ function resourceReplicaCompare() { # 1. namespace # 2. name of statefulset function testStatefulsetStatusByPods { - REPLICAS=$(kubectl get statefulset -n "$1" "$2" -o jsonpath="{.status.replicas}") + REPLICAS=$(kubectl get statefulset -n "$1" "$2" -o jsonpath="{.status.replicas}") - for replica in $(seq 0 $((REPLICAS - 1))); do - POD_NAME=$2-$replica - if ! kubectl wait -n "$1" --for=condition=ready pod "$POD_NAME" --timeout=60s > /dev/null; then - echo -n -e "\tnot ready ❌"; FAILURES=$((FAILURES+1)) - DEBUG_OUTPUT+="$(kubectl get statefulset -n "$1" "$2" -o json)" - if [ "$LOGGING" == "--logging-enabled" ]; then - writeLog "${1}" "${2}" "Pod" - writeLog "${1}" "${2}" "${kind}" - writeEvent "${1}" "${2}" "Pod" - fi - return - fi - done - echo -n -e "\tready ✔"; SUCCESSES=$((SUCCESSES+1)) + for replica in $(seq 0 $((REPLICAS - 1))); do + POD_NAME=$2-$replica + if ! kubectl wait -n "$1" --for=condition=ready pod "$POD_NAME" --timeout=60s >/dev/null; then + echo -n -e "\tnot ready ❌" + FAILURES=$((FAILURES + 1)) + DEBUG_OUTPUT+="$(kubectl get statefulset -n "$1" "$2" -o json)" + if [ "$LOGGING" == "--logging-enabled" ]; then + writeLog "${1}" "${2}" "Pod" + writeLog "${1}" "${2}" "${kind}" + writeEvent "${1}" "${2}" "Pod" + fi + return + fi + done + echo -n -e "\tready ✔" + SUCCESSES=$((SUCCESSES + 1)) } # Args: @@ -154,32 +160,34 @@ function testStatefulsetStatusByPods { # 2. name of job # 3. Wait time for job to finish before marking failed function testJobStatus { - if kubectl wait --for=condition=complete --timeout="$3" -n "$1" job/"$2" > /dev/null; then - echo -n -e "\tcompleted ✔"; SUCCESSES=$((SUCCESSES+1)) - else - echo -n -e "\tnot completed ❌"; FAILURES=$((FAILURES+1)) - DEBUG_OUTPUT+=$(kubectl get -n "$1" job "$2" -o json) - fi - if [ "$LOGGING" == "--logging-enabled" ]; then - logJob "${1}" "${2}" - fi + if kubectl wait --for=condition=complete --timeout="$3" -n "$1" job/"$2" >/dev/null; then + echo -n -e "\tcompleted ✔" + SUCCESSES=$((SUCCESSES + 1)) + else + echo -n -e "\tnot completed ❌" + FAILURES=$((FAILURES + 1)) + DEBUG_OUTPUT+=$(kubectl get -n "$1" job "$2" -o json) + fi + if [ "$LOGGING" == "--logging-enabled" ]; then + logJob "${1}" "${2}" + fi } # Args: # 1. namespace # 2. name function logCronJob { - writeEvent "${1}" "${2}" "CronJob" - logJob "${1}" "${2}" + writeEvent "${1}" "${2}" "CronJob" + logJob "${1}" "${2}" } # Args: # 1. namespace # 2. name function logJob { - writeLog "${1}" "${2}" "Job" - writeEvent "${1}" "${2}" "Job" - writeEvent "${1}" "${2}" "Pod" + writeLog "${1}" "${2}" "Job" + writeEvent "${1}" "${2}" "Job" + writeEvent "${1}" "${2}" "Pod" } LOGSFOLDER="logs" @@ -191,26 +199,24 @@ EVENTSFOLDER="events" # 2. name # 3. kind function writeLog { - if [ -z "$PIPELINE" ] - then - return - fi + if [ -z "$PIPELINE" ]; then + return + fi - NAMESPACE=$1 - NAME=$2 - KIND=$3 - NAMES=$(kubectl get "$KIND" -n "$NAMESPACE" -o custom-columns=NAME:.metadata.name | grep "$NAME" | tail -n +1) - mapfile -t NAMESLIST <<< "$NAMES" + NAMESPACE=$1 + NAME=$2 + KIND=$3 + NAMES=$(kubectl get "$KIND" -n "$NAMESPACE" -o custom-columns=NAME:.metadata.name | grep "$NAME" | tail -n +1) + mapfile -t NAMESLIST <<<"$NAMES" - mkdir -p "./$LOGSFOLDER/$CLUSTER/$KIND/$NAMESPACE" - for NAME in "${NAMESLIST[@]}" - do - FILE="./$LOGSFOLDER/$CLUSTER/$KIND/$NAMESPACE/$NAME.log" - if [[ ! -f "$FILE" ]]; then - touch "$FILE" - kubectl -n "$NAMESPACE" logs "$KIND"/"$NAME" --all-containers=true > "$FILE" 2>&1 - fi - done + mkdir -p "./$LOGSFOLDER/$CLUSTER/$KIND/$NAMESPACE" + for NAME in "${NAMESLIST[@]}"; do + FILE="./$LOGSFOLDER/$CLUSTER/$KIND/$NAMESPACE/$NAME.log" + if [[ ! -f "$FILE" ]]; then + touch "$FILE" + kubectl -n "$NAMESPACE" logs "$KIND"/"$NAME" --all-containers=true >"$FILE" 2>&1 + fi + done } # This function writes events to file for specified @@ -219,28 +225,26 @@ function writeLog { # 2. name # 3. kind function writeEvent { - if [ -z "$PIPELINE" ] - then - return - fi + if [ -z "$PIPELINE" ]; then + return + fi - NAMESPACE=$1 - NAME=$2 - KIND=$3 - NAMES=$(kubectl get "$KIND" -n "$NAMESPACE" -o custom-columns=NAME:.metadata.name | grep "$NAME" | tail -n +1) - mapfile -t NAMESLIST <<< "$NAMES" + NAMESPACE=$1 + NAME=$2 + KIND=$3 + NAMES=$(kubectl get "$KIND" -n "$NAMESPACE" -o custom-columns=NAME:.metadata.name | grep "$NAME" | tail -n +1) + mapfile -t NAMESLIST <<<"$NAMES" - mkdir -p "./$EVENTSFOLDER/$CLUSTER/$KIND/$NAMESPACE" - for NAME in "${NAMESLIST[@]}" - do - FILE="./$EVENTSFOLDER/$CLUSTER/$KIND/$NAMESPACE/$NAME.event" - if [[ ! -f "$FILE" ]]; then - touch "$FILE" - DATA=$(kubectl get event -n "${NAMESPACE}" --field-selector involvedObject.kind="${KIND}",involvedObject.name="${NAME}" -o json) - MESSAGES=$(echo "${DATA}" | jq -r '.items | map(.message) | .[]') - echo "$MESSAGES" > "$FILE" - fi - done + mkdir -p "./$EVENTSFOLDER/$CLUSTER/$KIND/$NAMESPACE" + for NAME in "${NAMESLIST[@]}"; do + FILE="./$EVENTSFOLDER/$CLUSTER/$KIND/$NAMESPACE/$NAME.event" + if [[ ! -f "$FILE" ]]; then + touch "$FILE" + DATA=$(kubectl get event -n "${NAMESPACE}" --field-selector involvedObject.kind="${KIND}",involvedObject.name="${NAME}" -o json) + MESSAGES=$(echo "${DATA}" | jq -r '.items | map(.message) | .[]') + echo "$MESSAGES" >"$FILE" + fi + done } # Args: @@ -248,31 +252,34 @@ function writeEvent { # 2. url # 3. (optional) username and password, : function testEndpoint { - echo -e "Testing $1 endpoint" + echo -e "Testing $1 endpoint" - retries="${RETRY_COUNT}" - while [ ${retries} -gt 0 ]; do - args=( - --connect-timeout 20 - --max-time 60 - -ksIL - -o /dev/null - -X GET - -w "%{http_code}" - ) - [ -n "${3}" ] && args+=(-u "${3}") + retries="${RETRY_COUNT}" + while [ ${retries} -gt 0 ]; do + args=( + --connect-timeout 20 + --max-time 60 + -ksIL + -o /dev/null + -X GET + -w "%{http_code}" + ) + [ -n "${3}" ] && args+=(-u "${3}") - RES=$(curl "${args[@]}" "${2}") - [[ $RES == "200" || $RES == "401" ]] && break + RES=$(curl "${args[@]}" "${2}") + [[ $RES == "200" || $RES == "401" ]] && break - sleep "${RETRY_WAIT}" - retries=$((retries-1)) - done + sleep "${RETRY_WAIT}" + retries=$((retries - 1)) + done - if [[ $RES == "200" || $RES == "401" ]] - then echo "success ✔"; SUCCESSES=$((SUCCESSES+1)) - else echo "failure ❌"; FAILURES=$((FAILURES+1)) - fi + if [[ $RES == "200" || $RES == "401" ]]; then + echo "success ✔" + SUCCESSES=$((SUCCESSES + 1)) + else + echo "failure ❌" + FAILURES=$((FAILURES + 1)) + fi } # Args: @@ -280,28 +287,31 @@ function testEndpoint { # 2. url # 3. expected HTTP response code function testEndpointProtected { - echo -e "Testing if $1 endpoint is protected" + echo -e "Testing if $1 endpoint is protected" - retries="${RETRY_COUNT}" - while [ ${retries} -gt 0 ]; do - args=( - --connect-timeout 20 - --max-time 60 - -ksI - -o /dev/null - -X GET - -w "%{http_code}" - ) + retries="${RETRY_COUNT}" + while [ ${retries} -gt 0 ]; do + args=( + --connect-timeout 20 + --max-time 60 + -ksI + -o /dev/null + -X GET + -w "%{http_code}" + ) - RES=$(curl "${args[@]}" "${2}") - [[ $RES == "${3}" ]] && break + RES=$(curl "${args[@]}" "${2}") + [[ $RES == "${3}" ]] && break - sleep "${RETRY_WAIT}" - retries=$((retries-1)) - done + sleep "${RETRY_WAIT}" + retries=$((retries - 1)) + done - if [[ $RES == "${3}" ]] - then echo "success ✔"; SUCCESSES=$((SUCCESSES+1)) - else echo "failure ❌"; FAILURES=$((FAILURES+1)) - fi + if [[ $RES == "${3}" ]]; then + echo "success ✔" + SUCCESSES=$((SUCCESSES + 1)) + else + echo "failure ❌" + FAILURES=$((FAILURES + 1)) + fi } diff --git a/pipeline/test/services/prometheus-common.sh b/pipeline/test/services/prometheus-common.sh index a3019ceef1..9cb80a14b2 100755 --- a/pipeline/test/services/prometheus-common.sh +++ b/pipeline/test/services/prometheus-common.sh @@ -4,10 +4,10 @@ # Fetch the data set function getData() { - jsonData=$(curl --silent 'http://localhost:9090/api/v1/targets') - # Simplify the data by filtering out parts we do not need - echo "${jsonData}" | - jq '.data.activeTargets[] | + jsonData=$(curl --silent 'http://localhost:9090/api/v1/targets') + # Simplify the data by filtering out parts we do not need + echo "${jsonData}" | + jq '.data.activeTargets[] | {job: .scrapePool , health: .health, instance: .labels.instance}' } @@ -18,25 +18,24 @@ function getData() { # 2. target name # 2. expected target instances function check_target() { - data="${1}" - targetName="${2}" - desiredInstanceAmount="${3}" - - # Stores the value value of the "instance" key where the - # "job" key matches the value of the current target being tested - # The number of healthy instances - currentInstanceAmount=$(echo "${data}" | - jq -r --arg target "${targetName}" '. | + data="${1}" + targetName="${2}" + desiredInstanceAmount="${3}" + + # Stores the value value of the "instance" key where the + # "job" key matches the value of the current target being tested + # The number of healthy instances + currentInstanceAmount=$(echo "${data}" | + jq -r --arg target "${targetName}" '. | select(.job==$target and .health=="up") | .instance' | wc -w) - echo "${currentInstanceAmount}" - if [[ ${currentInstanceAmount} == "${desiredInstanceAmount}" ]]; - then - return 0 - else - return 1 - fi + echo "${currentInstanceAmount}" + if [[ ${currentInstanceAmount} == "${desiredInstanceAmount}" ]]; then + return 0 + else + return 1 + fi } # Check if the target is healthy and increment SUCCESSES or FAILURES accordingly @@ -45,75 +44,72 @@ function check_target() { # 2. target name # 2. expected target instances function test_target() { - data="${1}" - targetName="${2}" - desiredHealthy="${3}" - - if check_target "${data}" "${targetName}" "${desiredHealthy}" &> /dev/null - then - echo -e "${targetName}\t✔"; SUCCESSES=$((SUCCESSES+1)) - else - echo -e "${targetName}\t❌"; FAILURES=$((FAILURES+1)) - DEBUG_PROMETHEUS_TARGETS+=("${targetName}") - fi + data="${1}" + targetName="${2}" + desiredHealthy="${3}" + + if check_target "${data}" "${targetName}" "${desiredHealthy}" &>/dev/null; then + echo -e "${targetName}\t✔" + SUCCESSES=$((SUCCESSES + 1)) + else + echo -e "${targetName}\t❌" + FAILURES=$((FAILURES + 1)) + DEBUG_PROMETHEUS_TARGETS+=("${targetName}") + fi } function test_targets_retry() { - prometheusEndpoint="${1}" - shift - targets=("${@}") - - { - # Run port-forward instance as a background process - kubectl port-forward -n monitoring "${prometheusEndpoint}" 9090 & - PF_PID=$! - sleep 3 - } &> /dev/null - - # TODO: Why is this not working? - # trap 'kill "${PF_PID}"; wait "${PF_PID}" 2>/dev/null' RETURN - - echo -n "Checking targets up to 5 times to avoid flakes..." - for i in {1..5} - do - # Get data from prometheus - jsonData=$(getData) - - # Print progress - echo -n " ${i}" - - # Check all targets - # If there are failures we need to retry - failure=0 - for target in "${targets[@]}" - do - read -r -a arr <<< "${target}" - name="${arr[0]}" - instances="${arr[1]}" - if ! check_target "${jsonData}" "${name}" "${instances}" &> /dev/null - then - failure=1 - break - fi - done - - # If no failures, we are ready to move on - if [ ${failure} -eq 0 ] - then - break - fi - sleep 10 + prometheusEndpoint="${1}" + shift + targets=("${@}") + + { + # Run port-forward instance as a background process + kubectl port-forward -n monitoring "${prometheusEndpoint}" 9090 & + PF_PID=$! + sleep 3 + } &>/dev/null + + # TODO: Why is this not working? + # trap 'kill "${PF_PID}"; wait "${PF_PID}" 2>/dev/null' RETURN + + echo -n "Checking targets up to 5 times to avoid flakes..." + for i in {1..5}; do + # Get data from prometheus + jsonData=$(getData) + + # Print progress + echo -n " ${i}" + + # Check all targets + # If there are failures we need to retry + failure=0 + for target in "${targets[@]}"; do + read -r -a arr <<<"${target}" + name="${arr[0]}" + instances="${arr[1]}" + if ! check_target "${jsonData}" "${name}" "${instances}" &>/dev/null; then + failure=1 + break + fi done - kill "${PF_PID}"; wait "${PF_PID}" 2>/dev/null - - echo -e "\nRunning tests..." - # Test all targets - for target in "${targets[@]}" - do - read -r -a arr <<< "${target}" - name="${arr[0]}" - instances="${arr[1]}" - test_target "${jsonData}" "${name}" "${instances}" - done + # If no failures, we are ready to move on + if [ ${failure} -eq 0 ]; then + break + fi + sleep 10 + done + + kill "${PF_PID}" + wait "${PF_PID}" 2>/dev/null + + echo -e "\nRunning tests..." + # Test all targets + for target in "${targets[@]}"; do + read -r -a arr <<<"${target}" + name="${arr[0]}" + instances="${arr[1]}" + test_target "${jsonData}" "${name}" "${instances}" + done } diff --git a/pipeline/test/services/service-cluster/testCertManager.sh b/pipeline/test/services/service-cluster/testCertManager.sh index 8c62dd0047..11217d7d3c 100644 --- a/pipeline/test/services/service-cluster/testCertManager.sh +++ b/pipeline/test/services/service-cluster/testCertManager.sh @@ -6,213 +6,213 @@ INNER_SCRIPTS_PATH="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" source "${INNER_SCRIPTS_PATH}/../funcs.sh" function sc_certmanager_check_help() { - printf "%s\n" "[Usage]: test sc cert-manager [ARGUMENT]" - printf "\t%-25s %s\n" "--cluster-issuers" "Check cluster issuers" - printf "\t%-25s %s\n" "--certificates" "Check cluster certificates" - printf "\t%-25s %s\n" "--challenges" "Check challenges" - printf "%s\n" "[NOTE] If no argument is specified, it will go over all of them." + printf "%s\n" "[Usage]: test sc cert-manager [ARGUMENT]" + printf "\t%-25s %s\n" "--cluster-issuers" "Check cluster issuers" + printf "\t%-25s %s\n" "--certificates" "Check cluster certificates" + printf "\t%-25s %s\n" "--challenges" "Check challenges" + printf "%s\n" "[NOTE] If no argument is specified, it will go over all of them." - exit 0 + exit 0 } function sc_cert_manager_checks() { - if [[ ${#} == 0 ]]; then - echo "Running all checks ..." - check_sc_certmanager_cluster_issuers - check_sc_certmanager_apps_certificates - check_sc_certmanager_challenges - return - fi - while [[ ${#} -gt 0 ]]; do - case ${1} in - --cluster-issuers) - check_sc_certmanager_cluster_issuers - ;; - --certificates) - check_sc_certmanager_apps_certificates - ;; - --challenges) - check_sc_certmanager_challenges - ;; - --help) - sc_certmanager_check_help - ;; - esac - shift - done + if [[ ${#} == 0 ]]; then + echo "Running all checks ..." + check_sc_certmanager_cluster_issuers + check_sc_certmanager_apps_certificates + check_sc_certmanager_challenges + return + fi + while [[ ${#} -gt 0 ]]; do + case ${1} in + --cluster-issuers) + check_sc_certmanager_cluster_issuers + ;; + --certificates) + check_sc_certmanager_apps_certificates + ;; + --challenges) + check_sc_certmanager_challenges + ;; + --help) + sc_certmanager_check_help + ;; + esac + shift + done } function check_sc_certmanager_cluster_issuers() { - echo -ne "Checking cert manager cluster issuers ... " - no_error=true - debug_msg="" - - clusterIssuers=("letsencrypt-prod" "letsencrypt-staging") - - for clusterIssuer in "${clusterIssuers[@]}"; do - if kubectl get ClusterIssuer "$clusterIssuer" &>/dev/null; then - jsonData=$(kubectl get ClusterIssuer "$clusterIssuer" -ojson) - cluster_issuer_status=$(echo "$jsonData" | jq -r '.status.conditions[] | select(.type=="Ready") | .status') - if [[ "$cluster_issuer_status" == "True" ]]; then - IFS='-' read -ra data <<<"$clusterIssuer" - readarray custom_solvers < <(yq4 e -o=j -I=0 ".issuers.${data[0]}.${data[1]}.solvers[]" "${config['config_file_sc']}") - if ! [ ${#custom_solvers[@]} -eq 0 ]; then - for custom_solver in "${custom_solvers[@]}"; do - challenge_solver=$(echo "$custom_solver" | jq ". | del( .selector ) | keys[] ") - solver_exist=$(kubectl get ClusterIssuer "$clusterIssuer" -oyaml | yq4 e -o=j -I=0 ".spec.acme.solvers[].$challenge_solver") - if [[ $solver_exist == "null" ]]; then - no_error=false - debug_msg+="[ERROR] Missing custom solver : $challenge_solver for Cluster Issuer : $clusterIssuer\n" - fi - done - fi - - else - no_error=false - debug_msg+="[ERROR] ClusterIssuer $clusterIssuer is not ready\n" + echo -ne "Checking cert manager cluster issuers ... " + no_error=true + debug_msg="" + + clusterIssuers=("letsencrypt-prod" "letsencrypt-staging") + + for clusterIssuer in "${clusterIssuers[@]}"; do + if kubectl get ClusterIssuer "$clusterIssuer" &>/dev/null; then + jsonData=$(kubectl get ClusterIssuer "$clusterIssuer" -ojson) + cluster_issuer_status=$(echo "$jsonData" | jq -r '.status.conditions[] | select(.type=="Ready") | .status') + if [[ "$cluster_issuer_status" == "True" ]]; then + IFS='-' read -ra data <<<"$clusterIssuer" + readarray custom_solvers < <(yq4 e -o=j -I=0 ".issuers.${data[0]}.${data[1]}.solvers[]" "${config['config_file_sc']}") + if ! [ ${#custom_solvers[@]} -eq 0 ]; then + for custom_solver in "${custom_solvers[@]}"; do + challenge_solver=$(echo "$custom_solver" | jq ". | del( .selector ) | keys[] ") + solver_exist=$(kubectl get ClusterIssuer "$clusterIssuer" -oyaml | yq4 e -o=j -I=0 ".spec.acme.solvers[].$challenge_solver") + if [[ $solver_exist == "null" ]]; then + no_error=false + debug_msg+="[ERROR] Missing custom solver : $challenge_solver for Cluster Issuer : $clusterIssuer\n" fi - - else - no_error=false - debug_msg+="[ERROR] ClusterIssuer $clusterIssuer does not exist\n" + done fi - done - if $no_error; then - echo "success ✔" - echo -e "[DEBUG] All ClusterIssuer resources are present and ready, with correct solvers" + else + no_error=false + debug_msg+="[ERROR] ClusterIssuer $clusterIssuer is not ready\n" + fi + else - echo "failure ❌" - echo -e "$debug_msg" + no_error=false + debug_msg+="[ERROR] ClusterIssuer $clusterIssuer does not exist\n" fi + done + + if $no_error; then + echo "success ✔" + echo -e "[DEBUG] All ClusterIssuer resources are present and ready, with correct solvers" + else + echo "failure ❌" + echo -e "$debug_msg" + fi } function check_sc_certmanager_apps_certificates() { - echo -ne "Checking cert manager for Apps Certificates ... " - no_error=true - debug_msg="" - - certificates=( - "dex dex-tls" - "opensearch-system opensearch-admin" - "opensearch-system opensearch-ca" - "opensearch-system opensearch-dashboards-ingress-cert" - "opensearch-system opensearch-http" - "opensearch-system opensearch-ingress-cert" - "opensearch-system opensearch-transport" + echo -ne "Checking cert manager for Apps Certificates ... " + no_error=true + debug_msg="" + + certificates=( + "dex dex-tls" + "opensearch-system opensearch-admin" + "opensearch-system opensearch-ca" + "opensearch-system opensearch-dashboards-ingress-cert" + "opensearch-system opensearch-http" + "opensearch-system opensearch-ingress-cert" + "opensearch-system opensearch-transport" + ) + + enable_harbor=$(yq4 -e '.harbor.enabled' "${config['config_file_sc']}" 2>/dev/null) + enable_thanos=$(yq4 -e '.thanos.enabled' "${config['config_file_sc']}" 2>/dev/null) + enable_user_grafana=$(yq4 -e '.grafana.user.enabled' "${config['config_file_sc']}" 2>/dev/null) + + if "${enable_harbor}"; then + certificates+=( + "harbor harbor-core-cert" + "harbor harbor-core-ingress-cert" ) + fi - enable_harbor=$(yq4 -e '.harbor.enabled' "${config['config_file_sc']}" 2>/dev/null) - enable_thanos=$(yq4 -e '.thanos.enabled' "${config['config_file_sc']}" 2>/dev/null) - enable_user_grafana=$(yq4 -e '.grafana.user.enabled' "${config['config_file_sc']}" 2>/dev/null) - - if "${enable_harbor}"; then - certificates+=( - "harbor harbor-core-cert" - "harbor harbor-core-ingress-cert" - ) - fi - - if "${enable_user_grafana}"; then - certificates+=( - "monitoring ops-grafana-tls" - "monitoring user-grafana-tls" - ) - fi - - if "${enable_thanos}"; then - thanos_subdomain=$(yq4 -e '.thanos.receiver.subdomain' "${config['config_file_sc']}") - opsDomain=$(yq4 -e '.global.opsDomain' "${config['config_file_sc']}") - certificates+=( - "thanos $thanos_subdomain.$opsDomain-tls" - ) - fi + if "${enable_user_grafana}"; then + certificates+=( + "monitoring ops-grafana-tls" + "monitoring user-grafana-tls" + ) + fi - for cert in "${certificates[@]}"; do - read -r -a arr <<<"$cert" - namespace="${arr[0]}" - name="${arr[1]}" - if kubectl get "Certificate" -n "$namespace" "$name" &>/dev/null; then - certificate_data=$(kubectl get "Certificate" -n "$namespace" "$name" -ojson) - cert_renewal_time=$(echo "$certificate_data" | jq -r ".status.renewalTime") - cert_expiry_time=$(echo "$certificate_data" | jq -r ".status.notAfter") - cert_status=$(echo "$certificate_data" | jq -r ".status.conditions[] | select(.type==\"Ready\") | .status") - cert_status_message=$(echo "$certificate_data" | jq -r ".status.conditions[] | select(.type==\"Ready\") | .message") - if [[ "$cert_status" != "True" ]]; then - no_error=false - debug_msg+="[ERROR] $cert_status_message \n" - else - now_date=$(date +%s) - expiry_date=$(date -d "$cert_expiry_time" +%s) - renew_date=$(date -d "$cert_renewal_time" +%s) - ((expiry_diff = (expiry_date - now_date) / 86400)) - ((renew_diff = (renew_date - now_date) / 86400)) - if [[ $expiry_diff -lt 1 ]]; then - no_error=false - debug_msg+="[ERROR] $name will expire in less than $expiry_diff day(s)\n" - else - debug_msg+="[DEBUG] Certificate: $name is Ready, will expire in $expiry_diff day(s), will be renewed in $renew_diff day(s)\n" - fi - fi + if "${enable_thanos}"; then + thanos_subdomain=$(yq4 -e '.thanos.receiver.subdomain' "${config['config_file_sc']}") + opsDomain=$(yq4 -e '.global.opsDomain' "${config['config_file_sc']}") + certificates+=( + "thanos $thanos_subdomain.$opsDomain-tls" + ) + fi + + for cert in "${certificates[@]}"; do + read -r -a arr <<<"$cert" + namespace="${arr[0]}" + name="${arr[1]}" + if kubectl get "Certificate" -n "$namespace" "$name" &>/dev/null; then + certificate_data=$(kubectl get "Certificate" -n "$namespace" "$name" -ojson) + cert_renewal_time=$(echo "$certificate_data" | jq -r ".status.renewalTime") + cert_expiry_time=$(echo "$certificate_data" | jq -r ".status.notAfter") + cert_status=$(echo "$certificate_data" | jq -r ".status.conditions[] | select(.type==\"Ready\") | .status") + cert_status_message=$(echo "$certificate_data" | jq -r ".status.conditions[] | select(.type==\"Ready\") | .message") + if [[ "$cert_status" != "True" ]]; then + no_error=false + debug_msg+="[ERROR] $cert_status_message \n" + else + now_date=$(date +%s) + expiry_date=$(date -d "$cert_expiry_time" +%s) + renew_date=$(date -d "$cert_renewal_time" +%s) + ((expiry_diff = (expiry_date - now_date) / 86400)) + ((renew_diff = (renew_date - now_date) / 86400)) + if [[ $expiry_diff -lt 1 ]]; then + no_error=false + debug_msg+="[ERROR] $name will expire in less than $expiry_diff day(s)\n" else - no_error=false - debug_msg+="[ERROR]: Missing certificate : $name in namespace $namespace\n" + debug_msg+="[DEBUG] Certificate: $name is Ready, will expire in $expiry_diff day(s), will be renewed in $renew_diff day(s)\n" fi - done - - if $no_error; then - echo "success ✔" - echo -e "$debug_msg" + fi else - echo "failure ❌" - echo -e "$debug_msg" + no_error=false + debug_msg+="[ERROR]: Missing certificate : $name in namespace $namespace\n" fi + done + + if $no_error; then + echo "success ✔" + echo -e "$debug_msg" + else + echo "failure ❌" + echo -e "$debug_msg" + fi } function check_sc_certmanager_challenges() { - echo -ne "Checking cert manager Challenges ... " - no_error=true - debug_msg="" - - challenges_data=$(kubectl get challenges -A -ojson) - - readarray -t pending_challenges < <(jq -c '.items[] | select(.status.state=="pending")' <<<"$challenges_data") - - if ! [[ $(echo "$challenges_data" | jq '.items | length') -eq 0 ]]; then - if [[ ${#pending_challenges[@]} != 0 ]]; then - no_error=false - debug_msg+="[ERROR] There are some pending challenges\n" - for pending_challenge in "${pending_challenges[@]}"; do - challenge_name=$(echo "$pending_challenge" | jq -r ".metadata.name") - challenge_namespace=$(echo "$pending_challenge" | jq -r ".metadata.namespace") - pending_reason=$(echo "$pending_challenge" | jq -r ".status.reason") - debug_msg+="Challenge $challenge_name in the $challenge_namespace namespace is pending because : $pending_reason\n" - done - fi + echo -ne "Checking cert manager Challenges ... " + no_error=true + debug_msg="" + + challenges_data=$(kubectl get challenges -A -ojson) + + readarray -t pending_challenges < <(jq -c '.items[] | select(.status.state=="pending")' <<<"$challenges_data") + + if ! [[ $(echo "$challenges_data" | jq '.items | length') -eq 0 ]]; then + if [[ ${#pending_challenges[@]} != 0 ]]; then + no_error=false + debug_msg+="[ERROR] There are some pending challenges\n" + for pending_challenge in "${pending_challenges[@]}"; do + challenge_name=$(echo "$pending_challenge" | jq -r ".metadata.name") + challenge_namespace=$(echo "$pending_challenge" | jq -r ".metadata.namespace") + pending_reason=$(echo "$pending_challenge" | jq -r ".status.reason") + debug_msg+="Challenge $challenge_name in the $challenge_namespace namespace is pending because : $pending_reason\n" + done fi - - orders_data=$(kubectl get orders -A -ojson) - - if ! [[ $(echo "$orders_data" | jq '.items | length') -eq 0 ]]; then - readarray -t errored_orders < <(jq -c '.items[] | select(.status.state=="errored")' <<<"$orders_data") - - if [[ ${#errored_orders[@]} != 0 ]]; then - no_error=false - debug_msg+="[ERROR] There some errored orders\n" - for errored_order in "${errored_orders[@]}"; do - order_name=$(echo "$errored_order" | jq -r ".metadata.name") - order_namespace=$(echo "$errored_order" | jq -r ".metadata.namespace") - errored_reason=$(echo "$errored_order" | jq -r ".status.reason") - debug_msg+="Order $order_name in the $order_namespace namespace is errored because : $errored_reason\n" - done - fi - fi - - if $no_error; then - echo "success ✔" - echo -e "[DEBUG] There are no pending challenges, or errored orders" - else - echo "failure ❌" - echo -e "$debug_msg" + fi + + orders_data=$(kubectl get orders -A -ojson) + + if ! [[ $(echo "$orders_data" | jq '.items | length') -eq 0 ]]; then + readarray -t errored_orders < <(jq -c '.items[] | select(.status.state=="errored")' <<<"$orders_data") + + if [[ ${#errored_orders[@]} != 0 ]]; then + no_error=false + debug_msg+="[ERROR] There some errored orders\n" + for errored_order in "${errored_orders[@]}"; do + order_name=$(echo "$errored_order" | jq -r ".metadata.name") + order_namespace=$(echo "$errored_order" | jq -r ".metadata.namespace") + errored_reason=$(echo "$errored_order" | jq -r ".status.reason") + debug_msg+="Order $order_name in the $order_namespace namespace is errored because : $errored_reason\n" + done fi + fi + + if $no_error; then + echo "success ✔" + echo -e "[DEBUG] There are no pending challenges, or errored orders" + else + echo "failure ❌" + echo -e "$debug_msg" + fi } diff --git a/pipeline/test/services/service-cluster/testEndpoints.sh b/pipeline/test/services/service-cluster/testEndpoints.sh index dbe6da6127..b0e1b09f9f 100755 --- a/pipeline/test/services/service-cluster/testEndpoints.sh +++ b/pipeline/test/services/service-cluster/testEndpoints.sh @@ -27,14 +27,13 @@ testEndpoint OpenSearch "https://${opensearch_subdomain}.${ops_domain}/" testEndpoint OpenSearchDashboards "https://${opensearch_dashboards_subdomain}.${base_domain}/" if [ "$enable_harbor" == true ]; then - testEndpoint Harbor "https://${harbor_subdomain}.${base_domain}/" + testEndpoint Harbor "https://${harbor_subdomain}.${base_domain}/" fi testEndpoint Grafana "https://${grafana_ops_subdomain}.${ops_domain}/" -if [ "$enable_user_grafana" == "true" ] -then - testEndpoint Grafana-user "https://${grafana_subdomain}.${base_domain}/" +if [ "$enable_user_grafana" == "true" ]; then + testEndpoint Grafana-user "https://${grafana_subdomain}.${base_domain}/" fi echo @@ -43,13 +42,11 @@ echo "Testing endpoints protection" echo "=============================" if [ "$enable_harbor" == true ]; then - testEndpointProtected Harbor "https://${harbor_subdomain}.${base_domain}/api/v2.0/users" 401 + testEndpointProtected Harbor "https://${harbor_subdomain}.${base_domain}/api/v2.0/users" 401 fi - -if [ "$enable_user_grafana" == "true" ] -then - testEndpointProtected Grafana-user "https://${grafana_subdomain}.${base_domain}/admin/users" 302 +if [ "$enable_user_grafana" == "true" ]; then + testEndpointProtected Grafana-user "https://${grafana_subdomain}.${base_domain}/admin/users" 302 fi testEndpointProtected Grafana "https://${grafana_ops_subdomain}.${ops_domain}/" 302 @@ -59,5 +56,5 @@ testEndpointProtected OpenSearch "https://${opensearch_subdomain}.${ops_domain}/ testEndpointProtected OpenSearchDashboards "https://${opensearch_dashboards_subdomain}.${base_domain}/" 302 if [[ "${enable_thanos}" == "true" ]] && [[ "${enable_thanos_receiver}" == "true" ]]; then - testEndpointProtected ThanosReceiver "https://${thanos_subdomain}.${ops_domain}/" 401 + testEndpointProtected ThanosReceiver "https://${thanos_subdomain}.${ops_domain}/" 401 fi diff --git a/pipeline/test/services/service-cluster/testIngress.sh b/pipeline/test/services/service-cluster/testIngress.sh index 8f307125c3..e05f67c917 100644 --- a/pipeline/test/services/service-cluster/testIngress.sh +++ b/pipeline/test/services/service-cluster/testIngress.sh @@ -6,69 +6,69 @@ INNER_SCRIPTS_PATH="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" source "${INNER_SCRIPTS_PATH}/../funcs.sh" function sc_ingress_check_help() { - printf "%s\n" "[Usage]: test sc ingress [ARGUMENT]" - printf "\t%-25s %s\n" "--health" "Check Ingress Health" - printf "%s\n" "[NOTE] If no argument is specified, it will go over all of them." + printf "%s\n" "[Usage]: test sc ingress [ARGUMENT]" + printf "\t%-25s %s\n" "--health" "Check Ingress Health" + printf "%s\n" "[NOTE] If no argument is specified, it will go over all of them." - exit 0 + exit 0 } function sc_ingress_checks() { - if [[ ${#} == 0 ]]; then - echo "Running all checks ..." - check_sc_ingress_health - return - fi - while [[ ${#} -gt 0 ]]; do - case ${1} in - --health) - check_sc_ingress_health - ;; - --help) - sc_ingress_check_help - ;; - esac - shift - done + if [[ ${#} == 0 ]]; then + echo "Running all checks ..." + check_sc_ingress_health + return + fi + while [[ ${#} -gt 0 ]]; do + case ${1} in + --health) + check_sc_ingress_health + ;; + --help) + sc_ingress_check_help + ;; + esac + shift + done } function check_sc_ingress_health() { - echo -ne "Checking Ingress Nginx health ... " - no_error=true - debug_msg="" + echo -ne "Checking Ingress Nginx health ... " + no_error=true + debug_msg="" - desired_replicas=$(kubectl get daemonset -n ingress-nginx ingress-nginx-controller -ojson | jq ".status.desiredNumberScheduled | tonumber") - ready_replicas=$(kubectl get daemonset -n ingress-nginx ingress-nginx-controller -ojson | jq ".status.numberReady | tonumber") - has_proxy_protocol=$(kubectl get configmap -n ingress-nginx ingress-nginx-controller -oyaml | yq4 '.data.use-proxy-protocol') + desired_replicas=$(kubectl get daemonset -n ingress-nginx ingress-nginx-controller -ojson | jq ".status.desiredNumberScheduled | tonumber") + ready_replicas=$(kubectl get daemonset -n ingress-nginx ingress-nginx-controller -ojson | jq ".status.numberReady | tonumber") + has_proxy_protocol=$(kubectl get configmap -n ingress-nginx ingress-nginx-controller -oyaml | yq4 '.data.use-proxy-protocol') - diff=$((desired_replicas - ready_replicas)) - if "${has_proxy_protocol}"; then - debug_msg+="[DEBUG] unable to test ingress with proxy protocol\n" - echo "skipping -" - echo -ne "$debug_msg" - return - elif [[ $desired_replicas -eq $ready_replicas ]]; then - read -r -a pods <<<"$(kubectl get pods -n ingress-nginx -ojson | jq -r '.items[].metadata.name' | tr '\n' ' ')" - for pod in "${pods[@]}"; do - if [[ "$pod" =~ ingress-nginx-controller* ]]; then - # shellcheck disable=SC2086 - res=$(kubectl -n ingress-nginx exec -it "$pod" -- wget --spider -S --tries=4 --no-check-certificate https://localhost/healthz 2>&1 | grep "HTTP/" | awk '{print $2}') - if [[ "$res" != "200" ]]; then - no_error=false - debug_msg+="[ERROR] The following nginx pod $pod is not healthy\n" - fi - fi - done - else - no_error=false - debug_msg+="[ERROR] $diff out of $desired_replicas of ingress-nginx-controller pods are not ready\n" - fi + diff=$((desired_replicas - ready_replicas)) + if "${has_proxy_protocol}"; then + debug_msg+="[DEBUG] unable to test ingress with proxy protocol\n" + echo "skipping -" + echo -ne "$debug_msg" + return + elif [[ $desired_replicas -eq $ready_replicas ]]; then + read -r -a pods <<<"$(kubectl get pods -n ingress-nginx -ojson | jq -r '.items[].metadata.name' | tr '\n' ' ')" + for pod in "${pods[@]}"; do + if [[ "$pod" =~ ingress-nginx-controller* ]]; then + # shellcheck disable=SC2086 + res=$(kubectl -n ingress-nginx exec -it "$pod" -- wget --spider -S --tries=4 --no-check-certificate https://localhost/healthz 2>&1 | grep "HTTP/" | awk '{print $2}') + if [[ "$res" != "200" ]]; then + no_error=false + debug_msg+="[ERROR] The following nginx pod $pod is not healthy\n" + fi + fi + done + else + no_error=false + debug_msg+="[ERROR] $diff out of $desired_replicas of ingress-nginx-controller pods are not ready\n" + fi - if $no_error; then - echo "success ✔" - echo -e "[DEBUG] All nginx ingress pods are ready & healthy." - else - echo "failure ❌" - echo -e "$debug_msg" - fi + if $no_error; then + echo "success ✔" + echo -e "[DEBUG] All nginx ingress pods are ready & healthy." + else + echo "failure ❌" + echo -e "$debug_msg" + fi } diff --git a/pipeline/test/services/service-cluster/testOpensearch.sh b/pipeline/test/services/service-cluster/testOpensearch.sh index d0d85a5334..61b6babc25 100644 --- a/pipeline/test/services/service-cluster/testOpensearch.sh +++ b/pipeline/test/services/service-cluster/testOpensearch.sh @@ -12,436 +12,435 @@ opsDomain=$(yq4 '.global.opsDomain' common-config.yaml) popd || exit function opensearch_check_help() { - printf "%s\n" "[Usage]: test sc opensearch [ARGUMENT]" - printf "\t%-25s %s\n" "--cluster-health" "Get cluster health" - printf "\t%-25s %s\n" "--snapshot-status" "Check snapshot status" - printf "\t%-25s %s\n" "--breakers" "Check if circuit breakers have been triggered" - printf "\t%-25s %s\n" "--indices" "Check if there are any missing indices" - printf "\t%-25s %s\n" "--aliases" "Check if each aliases has a write index" - printf "\t%-25s %s\n" "--mappings" "Check mappings/fields count & limit" - printf "\t%-25s %s\n" "--user-roles" "Check configured user roles" - printf "\t%-25s %s\n" "--ism" "Check ISM" - printf "\t%-25s %s\n" "--object-store-access" "Check object store access" - printf "\t%-25s %s\n" "--fluentd" "Check that fluentd can connect to opensearch" - printf "%s\n" "[NOTE] If no argument is specified, it will go over all of them." - - exit 0 + printf "%s\n" "[Usage]: test sc opensearch [ARGUMENT]" + printf "\t%-25s %s\n" "--cluster-health" "Get cluster health" + printf "\t%-25s %s\n" "--snapshot-status" "Check snapshot status" + printf "\t%-25s %s\n" "--breakers" "Check if circuit breakers have been triggered" + printf "\t%-25s %s\n" "--indices" "Check if there are any missing indices" + printf "\t%-25s %s\n" "--aliases" "Check if each aliases has a write index" + printf "\t%-25s %s\n" "--mappings" "Check mappings/fields count & limit" + printf "\t%-25s %s\n" "--user-roles" "Check configured user roles" + printf "\t%-25s %s\n" "--ism" "Check ISM" + printf "\t%-25s %s\n" "--object-store-access" "Check object store access" + printf "\t%-25s %s\n" "--fluentd" "Check that fluentd can connect to opensearch" + printf "%s\n" "[NOTE] If no argument is specified, it will go over all of them." + + exit 0 } function sc_opensearch_checks() { - if [[ ${#} == 0 ]]; then - check_opensearch_cluster_health - check_opensearch_snapshots_status - check_opensearch_breakers - check_opensearch_indices - check_opensearch_aliases - check_opensearch_mappings - check_opensearch_user_roles - check_opensearch_ism - check_object_store_access - check_fluentd_connection - return - fi - while [[ ${#} -gt 0 ]]; do - case ${1} in - --cluster-health) - check_opensearch_cluster_health - ;; - --snapshot-status) - check_opensearch_snapshots_status - ;; - --breakers) - check_opensearch_breakers - ;; - --indices) - check_opensearch_indices - ;; - --aliases) - check_opensearch_aliases - ;; - --mappings) - check_opensearch_mappings - ;; - --user-roles) - check_opensearch_user_roles - ;; - --ism) - check_opensearch_ism - ;; - --object-store-access) - check_object_store_access - ;; - --fluentd) - check_fluentd_connection - ;; - --help) - opensearch_check_help - ;; - esac - shift - done + if [[ ${#} == 0 ]]; then + check_opensearch_cluster_health + check_opensearch_snapshots_status + check_opensearch_breakers + check_opensearch_indices + check_opensearch_aliases + check_opensearch_mappings + check_opensearch_user_roles + check_opensearch_ism + check_object_store_access + check_fluentd_connection + return + fi + while [[ ${#} -gt 0 ]]; do + case ${1} in + --cluster-health) + check_opensearch_cluster_health + ;; + --snapshot-status) + check_opensearch_snapshots_status + ;; + --breakers) + check_opensearch_breakers + ;; + --indices) + check_opensearch_indices + ;; + --aliases) + check_opensearch_aliases + ;; + --mappings) + check_opensearch_mappings + ;; + --user-roles) + check_opensearch_user_roles + ;; + --ism) + check_opensearch_ism + ;; + --object-store-access) + check_object_store_access + ;; + --fluentd) + check_fluentd_connection + ;; + --help) + opensearch_check_help + ;; + esac + shift + done } check_opensearch_cluster_health() { - echo -ne "Checking if opensearch cluster is healthy ... " - cluster_health=$(curl -sk -u admin:"${adminPassword}" -X GET "https://opensearch.${opsDomain}/_cluster/health") - status=$(echo "$cluster_health" | jq -r '.status') - if [[ $status != "green" ]]; then - echo -e "failure ❌" - echo "$cluster_health" | jq - else - echo -e "success ✔" - fi + echo -ne "Checking if opensearch cluster is healthy ... " + cluster_health=$(curl -sk -u admin:"${adminPassword}" -X GET "https://opensearch.${opsDomain}/_cluster/health") + status=$(echo "$cluster_health" | jq -r '.status') + if [[ $status != "green" ]]; then + echo -e "failure ❌" + echo "$cluster_health" | jq + else + echo -e "success ✔" + fi } check_opensearch_snapshots_status() { - echo -ne "Checking opensearch snapshots status ... " - no_error=true - debug_msg="" - repo_name=$(yq4 -e '.opensearch.snapshot.repository' "${config['config_file_sc']}") - repo_exists_status=$(curl -sk -u admin:"${adminPassword}" -X GET "https://opensearch.${opsDomain}/_snapshot/${repo_name}" | jq "select(.error)") - if [[ -z "$repo_exists_status" ]]; then - if kubectl get "cronjob" -n "opensearch-system" "opensearch-backup" &>/dev/null; then - snapshots=$(curl -sk -u admin:"${adminPassword}" -X GET "https://opensearch.${opsDomain}/_cat/snapshots/${repo_name}") - error=$(echo "$snapshots" | jq '.error' 2>/dev/null || true) - failed=$(echo "$snapshots" | grep 'FAILED' || true) - partial=$(echo "$snapshots" | grep 'PARTIAL' || true) - - if [[ "$error" != "" ]] && [[ "$error" != "null" ]]; then - no_error=false - debug_msg+="[ERROR] Error in snapshots output: \n $error\n" - else - if [[ "$failed" != "" ]]; then - no_error=false - debug_msg+="[ERROR] We found some failed snapshots: \n $failed\n" - fi - - if [[ "$partial" != "" ]]; then - no_error=false - debug_msg+="[WARNING] We found some partial snapshots: \n $partial\n" - fi - - IFS=$'\n' readarray -t data < <(awk '{ print $1 " " $2 " " $3}' <<<"$snapshots") - IFS=" " read -ra last_snapshot <<<"${data[-1]}" - - - if [[ "${#last_snapshot[@]}" -gt 0 ]]; then - now_epoch=$(date +%s) - last_snapshot_epoch=${last_snapshot[2]} - ((diff = now_epoch - last_snapshot_epoch)) - - if [[ $diff -gt 86400 ]]; then - no_error=false - debug_msg+="[ERROR] The latest snapshot has not been created within the past 24 hours, with status: ${last_snapshot[1]}\n" - else - debug_msg+="[WARNING] The latest snapshot has been created within the past 24 hours, with status: ${last_snapshot[1]}\n" - fi - else - no_error=false - debug_msg+="[ERROR] No snapshots found, if this is a brand new cluster this can safely be ignored\n" - fi - fi - else - no_error=false - debug_msg+="[ERROR] opensearch-backup cronjob doesn't exist\n" - fi - else + echo -ne "Checking opensearch snapshots status ... " + no_error=true + debug_msg="" + repo_name=$(yq4 -e '.opensearch.snapshot.repository' "${config['config_file_sc']}") + repo_exists_status=$(curl -sk -u admin:"${adminPassword}" -X GET "https://opensearch.${opsDomain}/_snapshot/${repo_name}" | jq "select(.error)") + if [[ -z "$repo_exists_status" ]]; then + if kubectl get "cronjob" -n "opensearch-system" "opensearch-backup" &>/dev/null; then + snapshots=$(curl -sk -u admin:"${adminPassword}" -X GET "https://opensearch.${opsDomain}/_cat/snapshots/${repo_name}") + error=$(echo "$snapshots" | jq '.error' 2>/dev/null || true) + failed=$(echo "$snapshots" | grep 'FAILED' || true) + partial=$(echo "$snapshots" | grep 'PARTIAL' || true) + + if [[ "$error" != "" ]] && [[ "$error" != "null" ]]; then no_error=false - debug_msg=$(echo -e "$repo_exists_status" | jq) - fi + debug_msg+="[ERROR] Error in snapshots output: \n $error\n" + else + if [[ "$failed" != "" ]]; then + no_error=false + debug_msg+="[ERROR] We found some failed snapshots: \n $failed\n" + fi - if $no_error; then - echo "success ✔" - echo "[DEBUG] All snapshots are either completed or in progress" + if [[ "$partial" != "" ]]; then + no_error=false + debug_msg+="[WARNING] We found some partial snapshots: \n $partial\n" + fi + + IFS=$'\n' readarray -t data < <(awk '{ print $1 " " $2 " " $3}' <<<"$snapshots") + IFS=" " read -ra last_snapshot <<<"${data[-1]}" + + if [[ "${#last_snapshot[@]}" -gt 0 ]]; then + now_epoch=$(date +%s) + last_snapshot_epoch=${last_snapshot[2]} + ((diff = now_epoch - last_snapshot_epoch)) + + if [[ $diff -gt 86400 ]]; then + no_error=false + debug_msg+="[ERROR] The latest snapshot has not been created within the past 24 hours, with status: ${last_snapshot[1]}\n" + else + debug_msg+="[WARNING] The latest snapshot has been created within the past 24 hours, with status: ${last_snapshot[1]}\n" + fi + else + no_error=false + debug_msg+="[ERROR] No snapshots found, if this is a brand new cluster this can safely be ignored\n" + fi + fi else - echo "failure ❌" - echo -e "$debug_msg" + no_error=false + debug_msg+="[ERROR] opensearch-backup cronjob doesn't exist\n" fi + else + no_error=false + debug_msg=$(echo -e "$repo_exists_status" | jq) + fi + + if $no_error; then + echo "success ✔" + echo "[DEBUG] All snapshots are either completed or in progress" + else + echo "failure ❌" + echo -e "$debug_msg" + fi } check_opensearch_indices() { - echo -ne "Checking opensearch indices status ... " - debug_msg="" - no_error=true - - for index in 'other' 'kubernetes' 'kubeaudit' 'authlog'; do - res=$(curl -w "%{http_code}" -o /dev/null -ksIL -u admin:"${adminPassword}" -X HEAD "https://opensearch.${opsDomain}/${index}") - if [[ $res != "200" ]]; then - debug_msg+="[ERROR] Missing index : ${index}\n" - no_error=false - fi - done + echo -ne "Checking opensearch indices status ... " + debug_msg="" + no_error=true - if $no_error; then - echo "success ✔" - echo -e "[DEBUG] All indices are present" - else - echo "failure ❌" - echo -e "$debug_msg" + for index in 'other' 'kubernetes' 'kubeaudit' 'authlog'; do + res=$(curl -w "%{http_code}" -o /dev/null -ksIL -u admin:"${adminPassword}" -X HEAD "https://opensearch.${opsDomain}/${index}") + if [[ $res != "200" ]]; then + debug_msg+="[ERROR] Missing index : ${index}\n" + no_error=false fi + done + + if $no_error; then + echo "success ✔" + echo -e "[DEBUG] All indices are present" + else + echo "failure ❌" + echo -e "$debug_msg" + fi } check_opensearch_breakers() { - echo -ne "Checking opensearch breakers ... " - breakers_data=$(curl -sk -u admin:"${adminPassword}" -X GET "https://opensearch.${opsDomain}/_nodes/_all/stats/breaker") - no_error=true - debug_msg="" - nodes_data=$(echo "$breakers_data" | jq ".nodes") - readarray -t nodes < <(jq -c 'to_entries[]' <<<"$nodes_data") - - for node in "${nodes[@]}"; do - node_name=$(jq '.key' <<<"$node") - readarray -t breakers < <(jq -c '.value.breakers | to_entries[]' <<<"$node") - for breaker in "${breakers[@]}"; do - tripped=$(jq ".value.tripped" <<<"$breaker") - name=$(jq ".key" <<<"$breaker") - if [[ $tripped == "1" ]]; then - no_error=false - debug_msg+="[DEBUG] A circuit breaker : $name has been triggered for node: $node_name\n" - fi - done + echo -ne "Checking opensearch breakers ... " + breakers_data=$(curl -sk -u admin:"${adminPassword}" -X GET "https://opensearch.${opsDomain}/_nodes/_all/stats/breaker") + no_error=true + debug_msg="" + nodes_data=$(echo "$breakers_data" | jq ".nodes") + readarray -t nodes < <(jq -c 'to_entries[]' <<<"$nodes_data") + + for node in "${nodes[@]}"; do + node_name=$(jq '.key' <<<"$node") + readarray -t breakers < <(jq -c '.value.breakers | to_entries[]' <<<"$node") + for breaker in "${breakers[@]}"; do + tripped=$(jq ".value.tripped" <<<"$breaker") + name=$(jq ".key" <<<"$breaker") + if [[ $tripped == "1" ]]; then + no_error=false + debug_msg+="[DEBUG] A circuit breaker : $name has been triggered for node: $node_name\n" + fi done + done - if $no_error; then - echo "success ✔" - echo "[DEBUG] None of the circuit breakers has been triggered" - else - echo "failure ❌" - echo -e "$debug_msg" - fi + if $no_error; then + echo "success ✔" + echo "[DEBUG] None of the circuit breakers has been triggered" + else + echo "failure ❌" + echo -e "$debug_msg" + fi } check_opensearch_aliases() { - echo -ne "Checking opensearch aliases indices mapping ... " - no_error=true - debug_msg="" - - curl -sk -o /tmp/response -u admin:"${adminPassword}" -X GET "https://opensearch.${opsDomain}/_cat/aliases" + echo -ne "Checking opensearch aliases indices mapping ... " + no_error=true + debug_msg="" - aliases=$(awk '{print $1}' /dev/null 2>&1 - echo $? - ) - if [[ ${command} -eq $EX_OK ]]; then - debug_msg="[DEBUG] Snapshot bucket ${snapshot_bucket} exist and can be accessed" - elif [[ ${command} -eq $EX_NOTFOUND ]]; then - no_error=false - debug_msg="[ERROR] S3 error: 404 (NoSuchBucket): The specified bucket ${snapshot_bucket} does not exist." - elif [[ ${command} -eq $EX_ACCESSDENIED ]]; then - no_error=false - debug_msg="[ERROR] S3 error: Insufficient permissions to perform the operation on S3" - elif [[ ${command} -eq $EX_CONFIG ]]; then - no_error=false - debug_msg="[ERROR] S3 error: Configuration file error" - else - no_error=false - debug_msg="[ERROR] An error happened, please try again" - fi - fi - - if $no_error; then - echo "success ✔" - echo -e "$debug_msg" + echo -ne "Checking opensearch snapshot bucket access... " + no_error=true + debug_msg="" + EX_NOTFOUND=12 + EX_OK=0 + EX_ACCESSDENIED=77 + EX_CONFIG=78 + snapshot_bucket=$(yq4 -e '.objectStorage.buckets.opensearch' "${config['config_file_sc']}") + + if [ ! -f "${CK8S_CONFIG_PATH}/.state/s3cfg.ini" ]; then + no_error=false + debug_msg="[SKIP] S3 configuration file missing" + else + command=$( + s3cmd --config <(sops -d "${CK8S_CONFIG_PATH}"/.state/s3cfg.ini) ls s3://"${snapshot_bucket}" >/dev/null 2>&1 + echo $? + ) + if [[ ${command} -eq $EX_OK ]]; then + debug_msg="[DEBUG] Snapshot bucket ${snapshot_bucket} exist and can be accessed" + elif [[ ${command} -eq $EX_NOTFOUND ]]; then + no_error=false + debug_msg="[ERROR] S3 error: 404 (NoSuchBucket): The specified bucket ${snapshot_bucket} does not exist." + elif [[ ${command} -eq $EX_ACCESSDENIED ]]; then + no_error=false + debug_msg="[ERROR] S3 error: Insufficient permissions to perform the operation on S3" + elif [[ ${command} -eq $EX_CONFIG ]]; then + no_error=false + debug_msg="[ERROR] S3 error: Configuration file error" else - echo "failure ❌" - echo -e "$debug_msg" + no_error=false + debug_msg="[ERROR] An error happened, please try again" fi + fi + + if $no_error; then + echo "success ✔" + echo -e "$debug_msg" + else + echo "failure ❌" + echo -e "$debug_msg" + fi } check_fluentd_connection() { - echo -ne "Checking if fluentd can connect to opensearch... " - no_error=true - debug_msg="" - - res=$(curl -w "%{http_code}" -o /dev/null -ksIL -u fluentd:"${fluentdPassword}" -X HEAD "https://opensearch.${opsDomain}/") - if [[ $res != "200" ]]; then - debug_msg+="[ERROR] $res : Fluentd cannot connect to opensearch.\nPlease check your credentials" - no_error=false - fi - - if $no_error; then - echo "success ✔" - echo -e "[DEBUG] Fluentd is able to connect to Opensearch" - else - echo "failure ❌" - echo -e "$debug_msg" - fi + echo -ne "Checking if fluentd can connect to opensearch... " + no_error=true + debug_msg="" + + res=$(curl -w "%{http_code}" -o /dev/null -ksIL -u fluentd:"${fluentdPassword}" -X HEAD "https://opensearch.${opsDomain}/") + if [[ $res != "200" ]]; then + debug_msg+="[ERROR] $res : Fluentd cannot connect to opensearch.\nPlease check your credentials" + no_error=false + fi + + if $no_error; then + echo "success ✔" + echo -e "[DEBUG] Fluentd is able to connect to Opensearch" + else + echo "failure ❌" + echo -e "$debug_msg" + fi } diff --git a/pipeline/test/services/service-cluster/testPodsReady.sh b/pipeline/test/services/service-cluster/testPodsReady.sh index b422c6744b..fa9ecc279e 100755 --- a/pipeline/test/services/service-cluster/testPodsReady.sh +++ b/pipeline/test/services/service-cluster/testPodsReady.sh @@ -28,59 +28,59 @@ echo "Testing deployments" echo "===================" deployments=( - "dex dex" - "cert-manager cert-manager" - "cert-manager cert-manager-cainjector" - "cert-manager cert-manager-webhook" - "kube-system coredns" - "kube-system metrics-server" - "ingress-nginx ingress-nginx-default-backend" - "monitoring kube-prometheus-stack-operator" - "monitoring ops-grafana" - "monitoring kube-prometheus-stack-kube-state-metrics" - "monitoring prometheus-blackbox-exporter" - "opensearch-system prometheus-opensearch-exporter" - "opensearch-system opensearch-dashboards" + "dex dex" + "cert-manager cert-manager" + "cert-manager cert-manager-cainjector" + "cert-manager cert-manager-webhook" + "kube-system coredns" + "kube-system metrics-server" + "ingress-nginx ingress-nginx-default-backend" + "monitoring kube-prometheus-stack-operator" + "monitoring ops-grafana" + "monitoring kube-prometheus-stack-kube-state-metrics" + "monitoring prometheus-blackbox-exporter" + "opensearch-system prometheus-opensearch-exporter" + "opensearch-system opensearch-dashboards" ) if "${enable_harbor}"; then - deployments+=( - "harbor harbor-core" - "harbor harbor-jobservice" - "harbor harbor-portal" - "harbor harbor-registry" - ) + deployments+=( + "harbor harbor-core" + "harbor harbor-jobservice" + "harbor harbor-portal" + "harbor harbor-registry" + ) fi if "${enable_user_grafana}"; then - deployments+=("monitoring user-grafana") + deployments+=("monitoring user-grafana") fi if "${enable_velero}"; then - deployments+=("velero velero") + deployments+=("velero velero") fi if "${enable_falco}" && "${enable_falco_alerts}"; then - deployments+=("falco falco-falcosidekick") + deployments+=("falco falco-falcosidekick") fi if [[ "${enable_thanos}" == "true" ]] && [[ "${enable_thanos_receiver}" == "true" ]]; then - deployments+=( - "thanos thanos-receiver-bucketweb" - "thanos thanos-receiver-compactor" - "thanos thanos-receiver-receive-distributor" - ) + deployments+=( + "thanos thanos-receiver-bucketweb" + "thanos thanos-receiver-compactor" + "thanos thanos-receiver-receive-distributor" + ) fi if [[ "${enable_thanos}" == "true" ]] && [[ "${enable_thanos_query}" == "true" ]]; then - deployments+=( - "thanos thanos-query-query" - "thanos thanos-query-query-frontend" - ) + deployments+=( + "thanos thanos-query-query" + "thanos thanos-query-query-frontend" + ) fi resourceKind="Deployment" # Get json data in a smaller dataset simpleData="$(getStatus $resourceKind)" for deployment in "${deployments[@]}"; do - read -r -a arr <<< "$deployment" - namespace="${arr[0]}" - name="${arr[1]}" - testResourceExistenceFast "${resourceKind}" "${namespace}" "${name}" "${simpleData}" + read -r -a arr <<<"$deployment" + namespace="${arr[0]}" + name="${arr[1]}" + testResourceExistenceFast "${resourceKind}" "${namespace}" "${name}" "${simpleData}" done echo @@ -89,30 +89,30 @@ echo "Testing daemonsets" echo "==================" daemonsets=( - "kube-system node-local-dns" - "ingress-nginx ingress-nginx-controller" - "monitoring kube-prometheus-stack-prometheus-node-exporter" + "kube-system node-local-dns" + "ingress-nginx ingress-nginx-controller" + "monitoring kube-prometheus-stack-prometheus-node-exporter" ) if "${enable_fluentd}"; then - daemonsets+=("fluentd-system fluentd-forwarder") + daemonsets+=("fluentd-system fluentd-forwarder") fi if "$enable_velero"; then - daemonsets+=("velero node-agent") + daemonsets+=("velero node-agent") fi if "${enable_kured}"; then daemonsets+=("kured kured") fi if "${enable_falco}"; then - daemonsets+=("falco falco") + daemonsets+=("falco falco") fi resourceKind="DaemonSet" # Get json data in a smaller dataset simpleData="$(getStatus $resourceKind)" for daemonset in "${daemonsets[@]}"; do - read -r -a arr <<< "$daemonset" - namespace="${arr[0]}" - name="${arr[1]}" - testResourceExistenceFast ${resourceKind} "${namespace}" "${name}" "${simpleData}" + read -r -a arr <<<"$daemonset" + namespace="${arr[0]}" + name="${arr[1]}" + testResourceExistenceFast ${resourceKind} "${namespace}" "${name}" "${simpleData}" done echo @@ -121,53 +121,53 @@ echo "Testing statefulsets" echo "====================" statefulsets=( - "monitoring prometheus-kube-prometheus-stack-prometheus" - "monitoring alertmanager-kube-prometheus-stack-alertmanager" - "opensearch-system opensearch-master" + "monitoring prometheus-kube-prometheus-stack-prometheus" + "monitoring alertmanager-kube-prometheus-stack-alertmanager" + "opensearch-system opensearch-master" ) if "${enable_os_data_sts}"; then - statefulsets+=("opensearch-system opensearch-data") + statefulsets+=("opensearch-system opensearch-data") fi if "${enable_os_client_sts}"; then - statefulsets+=("opensearch-system opensearch-client") + statefulsets+=("opensearch-system opensearch-client") fi if "${enable_harbor}"; then - statefulsets+=( - "harbor harbor-database" - "harbor harbor-redis" - "harbor harbor-trivy" - ) + statefulsets+=( + "harbor harbor-database" + "harbor harbor-redis" + "harbor harbor-trivy" + ) fi if "${enable_fluentd}"; then - statefulsets+=("fluentd-system fluentd-aggregator") + statefulsets+=("fluentd-system fluentd-aggregator") fi if [[ "${enable_thanos}" == "true" ]] && [[ "${enable_thanos_receiver}" == "true" ]]; then - statefulsets+=( - "thanos thanos-receiver-receive" - "thanos thanos-receiver-storegateway" - ) + statefulsets+=( + "thanos thanos-receiver-receive" + "thanos thanos-receiver-storegateway" + ) fi if [[ "${enable_thanos}" == "true" ]] && [[ "${enable_thanos_ruler}" == "true" ]]; then - statefulsets+=("thanos thanos-receiver-ruler") + statefulsets+=("thanos thanos-receiver-ruler") fi resourceKind="StatefulSet" # Get json data in a smaller dataset simpleData="$(getStatus $resourceKind)" for statefulset in "${statefulsets[@]}"; do - read -r -a arr <<< "$statefulset" - namespace="${arr[0]}" - name="${arr[1]}" - testResourceExistenceFast ${resourceKind} "${namespace}" "${name}" "${simpleData}" + read -r -a arr <<<"$statefulset" + namespace="${arr[0]}" + name="${arr[1]}" + testResourceExistenceFast ${resourceKind} "${namespace}" "${name}" "${simpleData}" done # Format: # namespace job-name timeout jobs=( - "opensearch-system opensearch-configurer 120s" + "opensearch-system opensearch-configurer 120s" ) if "${enable_harbor}"; then - jobs+=("harbor init-harbor-job 120s") + jobs+=("harbor init-harbor-job 120s") fi echo @@ -176,14 +176,14 @@ echo "Testing jobs" echo "====================" for job in "${jobs[@]}"; do - read -r -a arr <<< "$job" - namespace="${arr[0]}" - name="${arr[1]}" - timeout="${arr[2]}" - echo -n -e "\n${name}\t" - if testResourceExistence job "${namespace}" "${name}"; then - testJobStatus "${namespace}" "${name}" "${timeout}" - fi + read -r -a arr <<<"$job" + namespace="${arr[0]}" + name="${arr[1]}" + timeout="${arr[2]}" + echo -n -e "\n${name}\t" + if testResourceExistence job "${namespace}" "${name}"; then + testJobStatus "${namespace}" "${name}" "${timeout}" + fi done # Format: @@ -193,35 +193,35 @@ cronjobs=( "opensearch-system opensearch-curator" ) if "${enable_harbor}" && "${enable_harbor_backup}"; then - cronjobs+=("harbor harbor-backup-cronjob") + cronjobs+=("harbor harbor-backup-cronjob") fi if "${enable_opensearch_snapshot}"; then - cronjobs+=( - "opensearch-system opensearch-backup" - "opensearch-system opensearch-slm" - ) + cronjobs+=( + "opensearch-system opensearch-backup" + "opensearch-system opensearch-slm" + ) fi if "${enable_fluentd_audit}"; then - cluster_name="$(yq4 '.global.clusterName' "${CONFIG_FILE}" 2>/dev/null)" - mapfile -t clusters_monitoring < <(yq4 '.global.clustersMonitoring[]' "${CONFIG_FILE}" 2>/dev/null) - + cluster_name="$(yq4 '.global.clusterName' "${CONFIG_FILE}" 2>/dev/null)" + mapfile -t clusters_monitoring < <(yq4 '.global.clustersMonitoring[]' "${CONFIG_FILE}" 2>/dev/null) + + cronjobs+=( + "fluentd-system audit-$cluster_name-compaction" + "fluentd-system audit-$cluster_name-retention" + ) + for cluster in "${clusters_monitoring[@]}"; do cronjobs+=( - "fluentd-system audit-$cluster_name-compaction" - "fluentd-system audit-$cluster_name-retention" + "fluentd-system audit-$cluster-compaction" + "fluentd-system audit-$cluster-retention" ) - for cluster in "${clusters_monitoring[@]}"; do - cronjobs+=( - "fluentd-system audit-$cluster-compaction" - "fluentd-system audit-$cluster-retention" - ) - done + done fi if "${enable_fluentd_logs}"; then - cronjobs+=( - "fluentd-system sc-logs-logs-compaction" - "fluentd-system sc-logs-logs-retention" - ) + cronjobs+=( + "fluentd-system sc-logs-logs-compaction" + "fluentd-system sc-logs-logs-retention" + ) fi echo @@ -230,11 +230,11 @@ echo "Testing cronjobs" echo "====================" for cronjob in "${cronjobs[@]}"; do - read -r -a arr <<< "$cronjob" - namespace="${arr[0]}" - name="${arr[1]}" - echo -n -e "\n${name}\t" - if testResourceExistence cronjob "${namespace}" "${name}" && [ "$LOGGING" == "--logging-enabled" ]; then - logCronJob "${namespace}" "${name}" - fi + read -r -a arr <<<"$cronjob" + namespace="${arr[0]}" + name="${arr[1]}" + echo -n -e "\n${name}\t" + if testResourceExistence cronjob "${namespace}" "${name}" && [ "$LOGGING" == "--logging-enabled" ]; then + logCronJob "${namespace}" "${name}" + fi done diff --git a/pipeline/test/services/service-cluster/testPrometheusTargets.sh b/pipeline/test/services/service-cluster/testPrometheusTargets.sh index 2a403e0439..94ca5a427f 100755 --- a/pipeline/test/services/service-cluster/testPrometheusTargets.sh +++ b/pipeline/test/services/service-cluster/testPrometheusTargets.sh @@ -29,44 +29,44 @@ echo "==================================" # "monitoring/kube-prometheus-stack-kube-etcd/0 1" # "monitoring/kube-prometheus-stack-kube-proxy/0 1" scTargets=( - "serviceMonitor/opensearch-system/prometheus-opensearch-exporter/0 1" - "serviceMonitor/monitoring/prometheus-blackbox-exporter-dex/0 1" - "serviceMonitor/monitoring/prometheus-blackbox-exporter-grafana/0 1" - "serviceMonitor/monitoring/prometheus-blackbox-exporter-opensearch-dashboards/0 1" - "serviceMonitor/monitoring/kube-prometheus-stack-alertmanager/0 2" - "serviceMonitor/monitoring/kube-prometheus-stack-apiserver/0 ${masterNodes}" - "serviceMonitor/monitoring/kube-prometheus-stack-coredns/0 2" - "serviceMonitor/monitoring/ops-grafana/0 1" - "serviceMonitor/monitoring/kube-prometheus-stack-kube-state-metrics/0 1" - "serviceMonitor/monitoring/kube-prometheus-stack-kubelet/0 ${totalNodes}" - "serviceMonitor/monitoring/kube-prometheus-stack-kubelet/1 ${totalNodes}" - "serviceMonitor/monitoring/kube-prometheus-stack-prometheus-node-exporter/0 ${totalNodes}" - "serviceMonitor/monitoring/kube-prometheus-stack-operator/0 1" - "serviceMonitor/monitoring/kube-prometheus-stack-prometheus/0 ${totalPrometheus}" + "serviceMonitor/opensearch-system/prometheus-opensearch-exporter/0 1" + "serviceMonitor/monitoring/prometheus-blackbox-exporter-dex/0 1" + "serviceMonitor/monitoring/prometheus-blackbox-exporter-grafana/0 1" + "serviceMonitor/monitoring/prometheus-blackbox-exporter-opensearch-dashboards/0 1" + "serviceMonitor/monitoring/kube-prometheus-stack-alertmanager/0 2" + "serviceMonitor/monitoring/kube-prometheus-stack-apiserver/0 ${masterNodes}" + "serviceMonitor/monitoring/kube-prometheus-stack-coredns/0 2" + "serviceMonitor/monitoring/ops-grafana/0 1" + "serviceMonitor/monitoring/kube-prometheus-stack-kube-state-metrics/0 1" + "serviceMonitor/monitoring/kube-prometheus-stack-kubelet/0 ${totalNodes}" + "serviceMonitor/monitoring/kube-prometheus-stack-kubelet/1 ${totalNodes}" + "serviceMonitor/monitoring/kube-prometheus-stack-prometheus-node-exporter/0 ${totalNodes}" + "serviceMonitor/monitoring/kube-prometheus-stack-operator/0 1" + "serviceMonitor/monitoring/kube-prometheus-stack-prometheus/0 ${totalPrometheus}" ) if [ ${#custom_kubeapi_targets[@]} -gt 0 ]; then - for target_name in "${custom_kubeapi_targets[@]}"; do - scTargets+=("serviceMonitor/monitoring/prometheus-blackbox-exporter-${target_name}/0 1") - done + for target_name in "${custom_kubeapi_targets[@]}"; do + scTargets+=("serviceMonitor/monitoring/prometheus-blackbox-exporter-${target_name}/0 1") + done else - scTargets+=("serviceMonitor/monitoring/prometheus-blackbox-exporter-user-api-server/0 1") + scTargets+=("serviceMonitor/monitoring/prometheus-blackbox-exporter-user-api-server/0 1") fi if [[ "${enable_thanos}" == "true" ]] && [[ "${enable_thanos_service_monitor}" == "true" ]] && [[ "${enable_thanos_receiver}" == "true" ]]; then - scTargets+=( - "serviceMonitor/thanos/thanos-receiver-bucketweb/0 1" - "serviceMonitor/thanos/thanos-receiver-compactor/0 1" - "serviceMonitor/thanos/thanos-receiver-receive/0 3" - "serviceMonitor/thanos/thanos-receiver-storegateway/0 1" - ) + scTargets+=( + "serviceMonitor/thanos/thanos-receiver-bucketweb/0 1" + "serviceMonitor/thanos/thanos-receiver-compactor/0 1" + "serviceMonitor/thanos/thanos-receiver-receive/0 3" + "serviceMonitor/thanos/thanos-receiver-storegateway/0 1" + ) fi if [[ "${enable_thanos}" == "true" ]] && [[ "${enable_thanos_service_monitor}" == "true" ]] && [[ "${enable_thanos_ruler}" == "true" ]]; then - scTargets+=("serviceMonitor/thanos/thanos-receiver-ruler/0 4") + scTargets+=("serviceMonitor/thanos/thanos-receiver-ruler/0 4") fi if [[ "${enable_thanos}" == "true" ]] && [[ "${enable_thanos_service_monitor}" == "true" ]] && [[ "${enable_thanos_query}" == "true" ]]; then - scTargets+=( - "serviceMonitor/thanos/thanos-query-query/0 2" - "serviceMonitor/thanos/thanos-query-query-frontend/0 1" - ) + scTargets+=( + "serviceMonitor/thanos/thanos-query-query/0 2" + "serviceMonitor/thanos/thanos-query-query-frontend/0 1" + ) fi test_targets_retry "svc/kube-prometheus-stack-prometheus" "${scTargets[@]}" diff --git a/pipeline/test/services/test-sc.sh b/pipeline/test/services/test-sc.sh index 50a13ed20f..a6c67688ed 100755 --- a/pipeline/test/services/test-sc.sh +++ b/pipeline/test/services/test-sc.sh @@ -2,9 +2,9 @@ SCRIPTS_PATH="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" -if [[ ! -f $1 ]];then - echo "ERROR: $1 is not a valid file" - exit 1 +if [[ ! -f $1 ]]; then + echo "ERROR: $1 is not a valid file" + exit 1 fi export CONFIG_FILE=$1 LOGGING="${3:-$2}" @@ -29,46 +29,44 @@ source "${SCRIPTS_PATH}"/service-cluster/testPrometheusTargets.sh echo -e "\nSuccesses: $SUCCESSES" echo "Failures: $FAILURES" -if [ $FAILURES -gt 0 ] && [ "$LOGGING" == "--logging-enabled" ] -then - echo "Something failed" - echo - echo "Logs from failed test resources" - echo "===============================" - echo - echo "Exists in logs/ServiceCluster//" - echo - echo "Events from failed test resources" - echo "===============================" - echo - echo "Exists in events/ServiceCluster//" - echo - echo "Json output of failed test resources" - echo "====================================" - echo - echo "${DEBUG_OUTPUT[@]}" | jq . - echo - echo "Unhealthy/missing prometheus targets" - echo "====================================" - echo - echo "${DEBUG_PROMETHEUS_TARGETS[@]}" - echo - exit 1 -elif [ $FAILURES -gt 0 ] -then - echo "Something failed" - echo - echo "Json output of failed test resources" - echo "====================================" - echo - echo "${DEBUG_OUTPUT[@]}" | jq . - echo - echo "Unhealthy/missing prometheus targets" - echo "====================================" - echo - echo "${DEBUG_PROMETHEUS_TARGETS[@]}" - echo - exit 1 +if [ $FAILURES -gt 0 ] && [ "$LOGGING" == "--logging-enabled" ]; then + echo "Something failed" + echo + echo "Logs from failed test resources" + echo "===============================" + echo + echo "Exists in logs/ServiceCluster//" + echo + echo "Events from failed test resources" + echo "===============================" + echo + echo "Exists in events/ServiceCluster//" + echo + echo "Json output of failed test resources" + echo "====================================" + echo + echo "${DEBUG_OUTPUT[@]}" | jq . + echo + echo "Unhealthy/missing prometheus targets" + echo "====================================" + echo + echo "${DEBUG_PROMETHEUS_TARGETS[@]}" + echo + exit 1 +elif [ $FAILURES -gt 0 ]; then + echo "Something failed" + echo + echo "Json output of failed test resources" + echo "====================================" + echo + echo "${DEBUG_OUTPUT[@]}" | jq . + echo + echo "Unhealthy/missing prometheus targets" + echo "====================================" + echo + echo "${DEBUG_PROMETHEUS_TARGETS[@]}" + echo + exit 1 fi echo "All tests succeeded" diff --git a/pipeline/test/services/test-wc.sh b/pipeline/test/services/test-wc.sh index cbf777cbc4..2787fa8d97 100755 --- a/pipeline/test/services/test-wc.sh +++ b/pipeline/test/services/test-wc.sh @@ -1,9 +1,9 @@ #!/usr/bin/env bash SCRIPTS_PATH="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" -if [[ ! -f $1 ]];then - echo "ERROR: $1 is not a valid file" - exit 1 +if [[ ! -f $1 ]]; then + echo "ERROR: $1 is not a valid file" + exit 1 fi export CONFIG_FILE=$1 @@ -28,46 +28,44 @@ source "${SCRIPTS_PATH}"/workload-cluster/testUserRbac.sh echo -e "\nSuccesses: $SUCCESSES" echo "Failures: $FAILURES" -if [ $FAILURES -gt 0 ] && [ "$LOGGING" == "--logging-enabled" ] -then - echo "Something failed" - echo - echo "Logs from failed test resources" - echo "===============================" - echo - echo "Exists in logs/WorkloadCluster//" - echo - echo "Events from failed test resources" - echo "===============================" - echo - echo "Exists in events/WorkloadCluster//" - echo - echo "Json output of failed test resources" - echo "====================================" - echo - echo "${DEBUG_OUTPUT[@]}" | jq . - echo - echo "Unhealthy/missing prometheus targets" - echo "====================================" - echo - echo "${DEBUG_PROMETHEUS_TARGETS[@]}" - echo - exit 1 -elif [ $FAILURES -gt 0 ] -then - echo "Something failed" - echo - echo "Json output of failed test resources" - echo "====================================" - echo - echo "${DEBUG_OUTPUT[@]}" | jq . - echo - echo "Unhealthy/missing prometheus targets" - echo "====================================" - echo - echo "${DEBUG_PROMETHEUS_TARGETS[@]}" - echo - exit 1 +if [ $FAILURES -gt 0 ] && [ "$LOGGING" == "--logging-enabled" ]; then + echo "Something failed" + echo + echo "Logs from failed test resources" + echo "===============================" + echo + echo "Exists in logs/WorkloadCluster//" + echo + echo "Events from failed test resources" + echo "===============================" + echo + echo "Exists in events/WorkloadCluster//" + echo + echo "Json output of failed test resources" + echo "====================================" + echo + echo "${DEBUG_OUTPUT[@]}" | jq . + echo + echo "Unhealthy/missing prometheus targets" + echo "====================================" + echo + echo "${DEBUG_PROMETHEUS_TARGETS[@]}" + echo + exit 1 +elif [ $FAILURES -gt 0 ]; then + echo "Something failed" + echo + echo "Json output of failed test resources" + echo "====================================" + echo + echo "${DEBUG_OUTPUT[@]}" | jq . + echo + echo "Unhealthy/missing prometheus targets" + echo "====================================" + echo + echo "${DEBUG_PROMETHEUS_TARGETS[@]}" + echo + exit 1 fi echo "All tests succeeded" diff --git a/pipeline/test/services/workload-cluster/testCertManager.sh b/pipeline/test/services/workload-cluster/testCertManager.sh index d6de12d1a9..a4ac2d6860 100644 --- a/pipeline/test/services/workload-cluster/testCertManager.sh +++ b/pipeline/test/services/workload-cluster/testCertManager.sh @@ -6,187 +6,187 @@ INNER_SCRIPTS_PATH="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" source "${INNER_SCRIPTS_PATH}/../funcs.sh" function wc_certmanager_check_help() { - printf "%s\n" "[Usage]: test wc cert-manager [ARGUMENT]" - printf "\t%-25s %s\n" "--cluster-issuers" "Check cluster issuers" - printf "\t%-25s %s\n" "--certificates" "Check cluster certificates" - printf "\t%-25s %s\n" "--challenges" "Check challenges" - printf "%s\n" "[NOTE] If no argument is specified, it will go over all of them." + printf "%s\n" "[Usage]: test wc cert-manager [ARGUMENT]" + printf "\t%-25s %s\n" "--cluster-issuers" "Check cluster issuers" + printf "\t%-25s %s\n" "--certificates" "Check cluster certificates" + printf "\t%-25s %s\n" "--challenges" "Check challenges" + printf "%s\n" "[NOTE] If no argument is specified, it will go over all of them." - exit 0 + exit 0 } function wc_cert_manager_checks() { - if [[ ${#} == 0 ]]; then - echo "Running all checks ..." - check_wc_certmanager_cluster_issuers - check_wc_certmanager_apps_certificates - check_wc_certmanager_challenges - return - fi - while [[ ${#} -gt 0 ]]; do - case ${1} in - --cluster-issuers) - check_wc_certmanager_cluster_issuers - ;; - --certificates) - check_wc_certmanager_apps_certificates - ;; - --challenges) - check_wc_certmanager_challenges - ;; - --help) - wc_certmanager_check_help - ;; - esac - shift - done + if [[ ${#} == 0 ]]; then + echo "Running all checks ..." + check_wc_certmanager_cluster_issuers + check_wc_certmanager_apps_certificates + check_wc_certmanager_challenges + return + fi + while [[ ${#} -gt 0 ]]; do + case ${1} in + --cluster-issuers) + check_wc_certmanager_cluster_issuers + ;; + --certificates) + check_wc_certmanager_apps_certificates + ;; + --challenges) + check_wc_certmanager_challenges + ;; + --help) + wc_certmanager_check_help + ;; + esac + shift + done } function check_wc_certmanager_cluster_issuers() { - echo -ne "Checking cert manager cluster issuers ... " - no_error=true - debug_msg="" - - clusterIssuers=("letsencrypt-prod" "letsencrypt-staging") - - for clusterIssuer in "${clusterIssuers[@]}"; do - if kubectl get ClusterIssuer "$clusterIssuer" &>/dev/null; then - jsonData=$(kubectl get ClusterIssuer "$clusterIssuer" -ojson) - cluster_issuer_status=$(echo "$jsonData" | jq -r '.status.conditions[] | select(.type=="Ready") | .status') - if [[ "$cluster_issuer_status" == "True" ]]; then - IFS='-' read -ra data <<<"$clusterIssuer" - readarray custom_solvers < <(yq4 e -o=j -I=0 ".issuers.${data[0]}.${data[1]}.solvers[]" "${config['config_file_wc']}") - if ! [ ${#custom_solvers[@]} -eq 0 ]; then - for custom_solver in "${custom_solvers[@]}"; do - challenge_solver=$(echo "$custom_solver" | jq ". | del( .selector ) | keys[] ") - solver_exist=$(kubectl get ClusterIssuer "$clusterIssuer" -oyaml | yq4 e -o=j -I=0 ".spec.acme.solvers[].$challenge_solver") - if [[ $solver_exist == "null" ]]; then - no_error=false - debug_msg+="[ERROR] Missing custom solver : $challenge_solver for Cluster Issuer : $clusterIssuer\n" - fi - done - fi - - else - no_error=false - debug_msg+="[ERROR] ClusterIssuer $clusterIssuer is not ready\n" + echo -ne "Checking cert manager cluster issuers ... " + no_error=true + debug_msg="" + + clusterIssuers=("letsencrypt-prod" "letsencrypt-staging") + + for clusterIssuer in "${clusterIssuers[@]}"; do + if kubectl get ClusterIssuer "$clusterIssuer" &>/dev/null; then + jsonData=$(kubectl get ClusterIssuer "$clusterIssuer" -ojson) + cluster_issuer_status=$(echo "$jsonData" | jq -r '.status.conditions[] | select(.type=="Ready") | .status') + if [[ "$cluster_issuer_status" == "True" ]]; then + IFS='-' read -ra data <<<"$clusterIssuer" + readarray custom_solvers < <(yq4 e -o=j -I=0 ".issuers.${data[0]}.${data[1]}.solvers[]" "${config['config_file_wc']}") + if ! [ ${#custom_solvers[@]} -eq 0 ]; then + for custom_solver in "${custom_solvers[@]}"; do + challenge_solver=$(echo "$custom_solver" | jq ". | del( .selector ) | keys[] ") + solver_exist=$(kubectl get ClusterIssuer "$clusterIssuer" -oyaml | yq4 e -o=j -I=0 ".spec.acme.solvers[].$challenge_solver") + if [[ $solver_exist == "null" ]]; then + no_error=false + debug_msg+="[ERROR] Missing custom solver : $challenge_solver for Cluster Issuer : $clusterIssuer\n" fi - - else - no_error=false - debug_msg+="[ERROR] ClusterIssuer $clusterIssuer does not exist\n" + done fi - done - if $no_error; then - echo "success ✔" - echo -e "[DEBUG] All ClusterIssuer resources are present and ready, with correct solvers" + else + no_error=false + debug_msg+="[ERROR] ClusterIssuer $clusterIssuer is not ready\n" + fi + else - echo "failure ❌" - echo -e "$debug_msg" + no_error=false + debug_msg+="[ERROR] ClusterIssuer $clusterIssuer does not exist\n" fi + done + + if $no_error; then + echo "success ✔" + echo -e "[DEBUG] All ClusterIssuer resources are present and ready, with correct solvers" + else + echo "failure ❌" + echo -e "$debug_msg" + fi } function check_wc_certmanager_apps_certificates() { - echo -ne "Checking cert manager for Certificates ... " - no_error=true - debug_msg="" - - certificates=() - - enable_hnc=$(yq4 -e '.hnc.enabled' "${config['config_file_wc']}" 2>/dev/null) - - if "${enable_hnc}"; then - certificates+=( - "hnc-system hnc-controller-webhook-server-cert" - ) - fi - - for cert in "${certificates[@]}"; do - read -r -a arr <<<"$cert" - namespace="${arr[0]}" - name="${arr[1]}" - if kubectl get "Certificate" -n "$namespace" "$name" &>/dev/null; then - certificate_data=$(kubectl get "Certificate" -n "$namespace" "$name" -ojson) - cert_renewal_time=$(echo "$certificate_data" | jq -r ".status.renewalTime") - cert_expiry_time=$(echo "$certificate_data" | jq -r ".status.notAfter") - cert_status=$(echo "$certificate_data" | jq -r ".status.conditions[] | select(.type==\"Ready\") | .status") - cert_status_message=$(echo "$certificate_data" | jq -r ".status.conditions[] | select(.type==\"Ready\") | .message") - if [[ "$cert_status" != "True" ]]; then - no_error=false - debug_msg+="[ERROR] $cert_status_message \n" - else - now_date=$(date +%s) - expiry_date=$(date -d "$cert_expiry_time" +%s) - renew_date=$(date -d "$cert_renewal_time" +%s) - ((expiry_diff = (expiry_date - now_date) / 86400)) - ((renew_diff = (renew_date - now_date) / 86400)) - if [[ $expiry_diff -lt 1 ]]; then - no_error=false - debug_msg+="[ERROR] $name will expire in less than $expiry_diff day(s)\n" - else - debug_msg+="[DEBUG] Certificate: $name is Ready, will expire in $expiry_diff day(s), will be renewed in $renew_diff day(s)\n" - fi - fi + echo -ne "Checking cert manager for Certificates ... " + no_error=true + debug_msg="" + + certificates=() + + enable_hnc=$(yq4 -e '.hnc.enabled' "${config['config_file_wc']}" 2>/dev/null) + + if "${enable_hnc}"; then + certificates+=( + "hnc-system hnc-controller-webhook-server-cert" + ) + fi + + for cert in "${certificates[@]}"; do + read -r -a arr <<<"$cert" + namespace="${arr[0]}" + name="${arr[1]}" + if kubectl get "Certificate" -n "$namespace" "$name" &>/dev/null; then + certificate_data=$(kubectl get "Certificate" -n "$namespace" "$name" -ojson) + cert_renewal_time=$(echo "$certificate_data" | jq -r ".status.renewalTime") + cert_expiry_time=$(echo "$certificate_data" | jq -r ".status.notAfter") + cert_status=$(echo "$certificate_data" | jq -r ".status.conditions[] | select(.type==\"Ready\") | .status") + cert_status_message=$(echo "$certificate_data" | jq -r ".status.conditions[] | select(.type==\"Ready\") | .message") + if [[ "$cert_status" != "True" ]]; then + no_error=false + debug_msg+="[ERROR] $cert_status_message \n" + else + now_date=$(date +%s) + expiry_date=$(date -d "$cert_expiry_time" +%s) + renew_date=$(date -d "$cert_renewal_time" +%s) + ((expiry_diff = (expiry_date - now_date) / 86400)) + ((renew_diff = (renew_date - now_date) / 86400)) + if [[ $expiry_diff -lt 1 ]]; then + no_error=false + debug_msg+="[ERROR] $name will expire in less than $expiry_diff day(s)\n" else - no_error=false - debug_msg+="[ERROR]: Missing certificate : $name in namespace $namespace\n" + debug_msg+="[DEBUG] Certificate: $name is Ready, will expire in $expiry_diff day(s), will be renewed in $renew_diff day(s)\n" fi - done - - if $no_error; then - echo "success ✔" - echo -e "$debug_msg" + fi else - echo "failure ❌" - echo -e "$debug_msg" + no_error=false + debug_msg+="[ERROR]: Missing certificate : $name in namespace $namespace\n" fi + done + + if $no_error; then + echo "success ✔" + echo -e "$debug_msg" + else + echo "failure ❌" + echo -e "$debug_msg" + fi } function check_wc_certmanager_challenges() { - echo -ne "Checking cert manager Challenges ... " - no_error=true - debug_msg="" - - challenges_data=$(kubectl get challenges -A -ojson) - - readarray -t pending_challenges < <(jq -c '.items[] | select(.status.state=="pending")' <<<"$challenges_data") - - if ! [[ $(echo "$challenges_data" | jq '.items | length') -eq 0 ]]; then - if [[ ${#pending_challenges[@]} != 0 ]]; then - no_error=false - debug_msg+="[ERROR] There are some pending challenges\n" - for pending_challenge in "${pending_challenges[@]}"; do - challenge_name=$(echo "$pending_challenge" | jq -r ".metadata.name") - challenge_namespace=$(echo "$pending_challenge" | jq -r ".metadata.namespace") - pending_reason=$(echo "$pending_challenge" | jq -r ".status.reason") - debug_msg+="Challenge $challenge_name in the $challenge_namespace namespace is pending because : $pending_reason\n" - done - fi + echo -ne "Checking cert manager Challenges ... " + no_error=true + debug_msg="" + + challenges_data=$(kubectl get challenges -A -ojson) + + readarray -t pending_challenges < <(jq -c '.items[] | select(.status.state=="pending")' <<<"$challenges_data") + + if ! [[ $(echo "$challenges_data" | jq '.items | length') -eq 0 ]]; then + if [[ ${#pending_challenges[@]} != 0 ]]; then + no_error=false + debug_msg+="[ERROR] There are some pending challenges\n" + for pending_challenge in "${pending_challenges[@]}"; do + challenge_name=$(echo "$pending_challenge" | jq -r ".metadata.name") + challenge_namespace=$(echo "$pending_challenge" | jq -r ".metadata.namespace") + pending_reason=$(echo "$pending_challenge" | jq -r ".status.reason") + debug_msg+="Challenge $challenge_name in the $challenge_namespace namespace is pending because : $pending_reason\n" + done fi - - orders_data=$(kubectl get orders -A -ojson) - - if ! [[ $(echo "$orders_data" | jq '.items | length') -eq 0 ]]; then - readarray -t errored_orders < <(jq -c '.items[] | select(.status.state=="errored")' <<<"$orders_data") - - if [[ ${#errored_orders[@]} != 0 ]]; then - no_error=false - debug_msg+="[ERROR] There some errored orders\n" - for errored_order in "${errored_orders[@]}"; do - order_name=$(echo "$errored_order" | jq -r ".metadata.name") - order_namespace=$(echo "$errored_order" | jq -r ".metadata.namespace") - errored_reason=$(echo "$errored_order" | jq -r ".status.reason") - debug_msg+="Order $order_name in the $order_namespace namespace is errored because : $errored_reason\n" - done - fi - fi - - if $no_error; then - echo "success ✔" - echo -e "[DEBUG] There are no pending challenges, or errored orders" - else - echo "failure ❌" - echo -e "$debug_msg" + fi + + orders_data=$(kubectl get orders -A -ojson) + + if ! [[ $(echo "$orders_data" | jq '.items | length') -eq 0 ]]; then + readarray -t errored_orders < <(jq -c '.items[] | select(.status.state=="errored")' <<<"$orders_data") + + if [[ ${#errored_orders[@]} != 0 ]]; then + no_error=false + debug_msg+="[ERROR] There some errored orders\n" + for errored_order in "${errored_orders[@]}"; do + order_name=$(echo "$errored_order" | jq -r ".metadata.name") + order_namespace=$(echo "$errored_order" | jq -r ".metadata.namespace") + errored_reason=$(echo "$errored_order" | jq -r ".status.reason") + debug_msg+="Order $order_name in the $order_namespace namespace is errored because : $errored_reason\n" + done fi + fi + + if $no_error; then + echo "success ✔" + echo -e "[DEBUG] There are no pending challenges, or errored orders" + else + echo "failure ❌" + echo -e "$debug_msg" + fi } diff --git a/pipeline/test/services/workload-cluster/testEndpoints.sh b/pipeline/test/services/workload-cluster/testEndpoints.sh index 0497d7a14a..36f7915f0c 100755 --- a/pipeline/test/services/workload-cluster/testEndpoints.sh +++ b/pipeline/test/services/workload-cluster/testEndpoints.sh @@ -13,9 +13,8 @@ base_domain=$(yq4 -e '.global.baseDomain' "${CONFIG_FILE}") enable_user_alertmanager_ingress=$(yq4 -e '.user.alertmanager.ingress.enabled' "${CONFIG_FILE}") enable_user_alertmanager=$(yq4 -e '.user.alertmanager.enabled' "${CONFIG_FILE}") -if [[ "${enable_user_alertmanager_ingress}" == "true" && "${enable_user_alertmanager}" == "true" ]] -then - testEndpoint Alertmanager-user "https://alertmanager.${base_domain}/" +if [[ "${enable_user_alertmanager_ingress}" == "true" && "${enable_user_alertmanager}" == "true" ]]; then + testEndpoint Alertmanager-user "https://alertmanager.${base_domain}/" fi echo @@ -23,7 +22,6 @@ echo echo "Testing endpoints protection" echo "=============================" -if [[ "${enable_user_alertmanager_ingress}" == "true" && "${enable_user_alertmanager}" == "true" ]] -then - testEndpointProtected Alertmanager-user "https://alertmanager.${base_domain}/" 401 +if [[ "${enable_user_alertmanager_ingress}" == "true" && "${enable_user_alertmanager}" == "true" ]]; then + testEndpointProtected Alertmanager-user "https://alertmanager.${base_domain}/" 401 fi diff --git a/pipeline/test/services/workload-cluster/testHNC.sh b/pipeline/test/services/workload-cluster/testHNC.sh index 8a5dd3a653..760f3e88c2 100644 --- a/pipeline/test/services/workload-cluster/testHNC.sh +++ b/pipeline/test/services/workload-cluster/testHNC.sh @@ -6,143 +6,143 @@ INNER_SCRIPTS_PATH="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" source "${INNER_SCRIPTS_PATH}/../funcs.sh" function wc_hnc_check_help() { - printf "%s\n" "[Usage]: test wc hnc [ARGUMENT]" - printf "\t%-25s %s\n" "--subns-anchors" "Check that users can create sub namespace anchors and remove them" - printf "\t%-25s %s\n" "--system-namespaces" "Check that no system namespace is labelled by HNC" - printf "%s\n" "[NOTE] If no argument is specified, it will go over all of them." + printf "%s\n" "[Usage]: test wc hnc [ARGUMENT]" + printf "\t%-25s %s\n" "--subns-anchors" "Check that users can create sub namespace anchors and remove them" + printf "\t%-25s %s\n" "--system-namespaces" "Check that no system namespace is labelled by HNC" + printf "%s\n" "[NOTE] If no argument is specified, it will go over all of them." - exit 0 + exit 0 } function wc_hnc_checks() { - if [[ ${#} == 0 ]]; then - echo "Running all checks ..." - check_wc_hnc_creation_removal - check_wc_hnc_system_namespaces - return - fi - while [[ ${#} -gt 0 ]]; do - case ${1} in - --subns-anchors) - check_wc_hnc_creation_removal - ;; - --system-namespaces) - check_wc_hnc_system_namespaces - ;; - --help) - wc_hnc_check_help - ;; - esac - shift - done + if [[ ${#} == 0 ]]; then + echo "Running all checks ..." + check_wc_hnc_creation_removal + check_wc_hnc_system_namespaces + return + fi + while [[ ${#} -gt 0 ]]; do + case ${1} in + --subns-anchors) + check_wc_hnc_creation_removal + ;; + --system-namespaces) + check_wc_hnc_system_namespaces + ;; + --help) + wc_hnc_check_help + ;; + esac + shift + done } function check_wc_hnc_creation_removal() { - echo -ne "Checking that users can create/delete sub namespaces ... " - no_error=true - debug_msg="" + echo -ne "Checking that users can create/delete sub namespaces ... " + no_error=true + debug_msg="" - mapfile -t user_namespaces < <(yq4 -e '.user.namespaces - (.user.constraints | keys) | .[]' "${config['config_file_wc']}") - mapfile -t user_admin_users < <(yq4 '.user.adminUsers[]' "${config['config_file_wc']}") - mapfile -t user_admin_groups < <(yq4 '.user.adminGroups[]' "${config['config_file_wc']}") + mapfile -t user_namespaces < <(yq4 -e '.user.namespaces - (.user.constraints | keys) | .[]' "${config['config_file_wc']}") + mapfile -t user_admin_users < <(yq4 '.user.adminUsers[]' "${config['config_file_wc']}") + mapfile -t user_admin_groups < <(yq4 '.user.adminGroups[]' "${config['config_file_wc']}") - VERBS=( - create - delete - patch - update - ) + VERBS=( + create + delete + patch + update + ) - CK8S_NAMESPACES=( - cert-manager - default - falco - fluentd - kube-system - monitoring - ingress-nginx - velero - ) + CK8S_NAMESPACES=( + cert-manager + default + falco + fluentd + kube-system + monitoring + ingress-nginx + velero + ) - for user in "${user_admin_users[@]}"; do - for namespace in "${user_namespaces[@]}"; do - for verb in "${VERBS[@]}"; do - if ! kubectl auth can-i "${verb}" "subns" -n "${namespace}" --as "${user}" >/dev/null 2>&1; then - no_error=false - debug_msg+="[ERROR] User ${user} cannot ${verb} sub namespace under ${namespace} namespace\n" - fi - done - done + for user in "${user_admin_users[@]}"; do + for namespace in "${user_namespaces[@]}"; do + for verb in "${VERBS[@]}"; do + if ! kubectl auth can-i "${verb}" "subns" -n "${namespace}" --as "${user}" >/dev/null 2>&1; then + no_error=false + debug_msg+="[ERROR] User ${user} cannot ${verb} sub namespace under ${namespace} namespace\n" + fi + done done - for group in "${user_admin_groups[@]}"; do - for namespace in "${user_namespaces[@]}"; do - for verb in "${VERBS[@]}"; do - if ! kubectl auth can-i "$verb" "subns" -n "$namespace" --as "test-user" --as-group "${group}" >/dev/null 2>&1; then - no_error=false - debug_msg+="[ERROR] Group ${group} cannot ${verb} sub namespace under ${namespace} namespace\n" - fi - done - done + done + for group in "${user_admin_groups[@]}"; do + for namespace in "${user_namespaces[@]}"; do + for verb in "${VERBS[@]}"; do + if ! kubectl auth can-i "$verb" "subns" -n "$namespace" --as "test-user" --as-group "${group}" >/dev/null 2>&1; then + no_error=false + debug_msg+="[ERROR] Group ${group} cannot ${verb} sub namespace under ${namespace} namespace\n" + fi + done done + done - for user in "${user_admin_users[@]}"; do - for namespace in "${CK8S_NAMESPACES[@]}"; do - for verb in "${VERBS[@]}"; do - if kubectl auth can-i "${verb}" "subns" -n "${namespace}" --as "${user}" >/dev/null 2>&1; then - no_error=false - debug_msg+="[ERROR] User ${user} can ${verb} subnamespace anchors under ${namespace} namespace\n" - fi - done - done + for user in "${user_admin_users[@]}"; do + for namespace in "${CK8S_NAMESPACES[@]}"; do + for verb in "${VERBS[@]}"; do + if kubectl auth can-i "${verb}" "subns" -n "${namespace}" --as "${user}" >/dev/null 2>&1; then + no_error=false + debug_msg+="[ERROR] User ${user} can ${verb} subnamespace anchors under ${namespace} namespace\n" + fi + done done - for group in "${user_admin_groups[@]}"; do - for namespace in "${CK8S_NAMESPACES[@]}"; do - for verb in "${VERBS[@]}"; do - if kubectl auth can-i "$verb" "subns" -n "$namespace" --as "test-user" --as-group "${group}" >/dev/null 2>&1; then - no_error=false - debug_msg+="[ERROR] Group ${group} can ${verb} subnamespace anchors under ${namespace} namespace\n" - fi - done - done + done + for group in "${user_admin_groups[@]}"; do + for namespace in "${CK8S_NAMESPACES[@]}"; do + for verb in "${VERBS[@]}"; do + if kubectl auth can-i "$verb" "subns" -n "$namespace" --as "test-user" --as-group "${group}" >/dev/null 2>&1; then + no_error=false + debug_msg+="[ERROR] Group ${group} can ${verb} subnamespace anchors under ${namespace} namespace\n" + fi + done done + done - if $no_error; then - echo "success ✔" - echo -e "[DEBUG] Users are able to create/delete subnamespaces anchors" - else - echo "failure ❌" - echo -e "${debug_msg}" - fi + if $no_error; then + echo "success ✔" + echo -e "[DEBUG] Users are able to create/delete subnamespaces anchors" + else + echo "failure ❌" + echo -e "${debug_msg}" + fi } function check_wc_hnc_system_namespaces() { - echo -ne "Checking that system namespaces are not labelled by HNC ... " - no_error=true - debug_msg="" + echo -ne "Checking that system namespaces are not labelled by HNC ... " + no_error=true + debug_msg="" - CK8S_NAMESPACES=( - cert-manager - falco - fluentd - kube-system - monitoring - ingress-nginx - velero - ) + CK8S_NAMESPACES=( + cert-manager + falco + fluentd + kube-system + monitoring + ingress-nginx + velero + ) - for namespace in "${CK8S_NAMESPACES[@]}"; do - hnc_label_exists=$(kubectl get ns "${namespace}" -ojson | jq -r '.metadata.labels | .["hnc.x-k8s.io/included-namespace"]') - if [[ "${hnc_label_exists}" == "true" ]]; then - no_error=false - debug_msg+="[ERROR] The ${namespace} namespace is labelled by HNC\n" - fi - done - - if $no_error; then - echo "success ✔" - echo -e "[DEBUG] No system namespace is labelled by HNC" - else - echo "failure ❌" - echo -e "${debug_msg}" + for namespace in "${CK8S_NAMESPACES[@]}"; do + hnc_label_exists=$(kubectl get ns "${namespace}" -ojson | jq -r '.metadata.labels | .["hnc.x-k8s.io/included-namespace"]') + if [[ "${hnc_label_exists}" == "true" ]]; then + no_error=false + debug_msg+="[ERROR] The ${namespace} namespace is labelled by HNC\n" fi + done + + if $no_error; then + echo "success ✔" + echo -e "[DEBUG] No system namespace is labelled by HNC" + else + echo "failure ❌" + echo -e "${debug_msg}" + fi } diff --git a/pipeline/test/services/workload-cluster/testIngress.sh b/pipeline/test/services/workload-cluster/testIngress.sh index 34ccd4041f..86b3682bf1 100644 --- a/pipeline/test/services/workload-cluster/testIngress.sh +++ b/pipeline/test/services/workload-cluster/testIngress.sh @@ -6,69 +6,69 @@ INNER_SCRIPTS_PATH="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" source "${INNER_SCRIPTS_PATH}/../funcs.sh" function wc_ingress_check_help() { - printf "%s\n" "[Usage]: test wc ingress [ARGUMENT]" - printf "\t%-25s %s\n" "--health" "Check Ingress Health" - printf "%s\n" "[NOTE] If no argument is specified, it will go over all of them." + printf "%s\n" "[Usage]: test wc ingress [ARGUMENT]" + printf "\t%-25s %s\n" "--health" "Check Ingress Health" + printf "%s\n" "[NOTE] If no argument is specified, it will go over all of them." - exit 0 + exit 0 } function wc_ingress_checks() { - if [[ ${#} == 0 ]]; then - echo "Running all checks ..." - check_wc_ingress_health - return - fi - while [[ ${#} -gt 0 ]]; do - case ${1} in - --health) - check_wc_ingress_health - ;; - --help) - wc_ingress_check_help - ;; - esac - shift - done + if [[ ${#} == 0 ]]; then + echo "Running all checks ..." + check_wc_ingress_health + return + fi + while [[ ${#} -gt 0 ]]; do + case ${1} in + --health) + check_wc_ingress_health + ;; + --help) + wc_ingress_check_help + ;; + esac + shift + done } function check_wc_ingress_health() { - echo -ne "Checking Ingress Nginx health ... " - no_error=true - debug_msg="" + echo -ne "Checking Ingress Nginx health ... " + no_error=true + debug_msg="" - desired_replicas=$(kubectl get daemonset -n ingress-nginx ingress-nginx-controller -ojson | jq ".status.desiredNumberScheduled | tonumber") - ready_replicas=$(kubectl get daemonset -n ingress-nginx ingress-nginx-controller -ojson | jq ".status.numberReady | tonumber") - has_proxy_protocol=$(kubectl get configmap -n ingress-nginx ingress-nginx-controller -oyaml | yq4 '.data.use-proxy-protocol') + desired_replicas=$(kubectl get daemonset -n ingress-nginx ingress-nginx-controller -ojson | jq ".status.desiredNumberScheduled | tonumber") + ready_replicas=$(kubectl get daemonset -n ingress-nginx ingress-nginx-controller -ojson | jq ".status.numberReady | tonumber") + has_proxy_protocol=$(kubectl get configmap -n ingress-nginx ingress-nginx-controller -oyaml | yq4 '.data.use-proxy-protocol') - diff=$((desired_replicas - ready_replicas)) - if "${has_proxy_protocol}"; then - debug_msg+="[DEBUG] unable to test ingress with proxy protocol\n" - echo "skipping -" - echo -ne "$debug_msg" - return - elif [[ $desired_replicas -eq $ready_replicas ]]; then - read -r -a pods <<<"$(kubectl get pods -n ingress-nginx -ojson | jq -r '.items[].metadata.name' | tr '\n' ' ')" - for pod in "${pods[@]}"; do - if [[ "$pod" =~ ingress-nginx-controller* ]]; then - # shellcheck disable=SC2086 - res=$(kubectl -n ingress-nginx exec -it "$pod" -- wget --spider -S --tries=4 --no-check-certificate https://localhost/healthz 2>&1 | grep "HTTP/" | awk '{print $2}') - if [[ "$res" != "200" ]]; then - no_error=false - debug_msg+="[ERROR] The following nginx pod $pod is not healthy\n" - fi - fi - done - else - no_error=false - debug_msg+="[ERROR] $diff out of $desired_replicas of ingress-nginx-controller pods are not ready\n" - fi + diff=$((desired_replicas - ready_replicas)) + if "${has_proxy_protocol}"; then + debug_msg+="[DEBUG] unable to test ingress with proxy protocol\n" + echo "skipping -" + echo -ne "$debug_msg" + return + elif [[ $desired_replicas -eq $ready_replicas ]]; then + read -r -a pods <<<"$(kubectl get pods -n ingress-nginx -ojson | jq -r '.items[].metadata.name' | tr '\n' ' ')" + for pod in "${pods[@]}"; do + if [[ "$pod" =~ ingress-nginx-controller* ]]; then + # shellcheck disable=SC2086 + res=$(kubectl -n ingress-nginx exec -it "$pod" -- wget --spider -S --tries=4 --no-check-certificate https://localhost/healthz 2>&1 | grep "HTTP/" | awk '{print $2}') + if [[ "$res" != "200" ]]; then + no_error=false + debug_msg+="[ERROR] The following nginx pod $pod is not healthy\n" + fi + fi + done + else + no_error=false + debug_msg+="[ERROR] $diff out of $desired_replicas of ingress-nginx-controller pods are not ready\n" + fi - if $no_error; then - echo "success ✔" - echo -e "[DEBUG] All nginx ingress pods are ready & healthy." - else - echo "failure ❌" - echo -e "$debug_msg" - fi + if $no_error; then + echo "success ✔" + echo -e "[DEBUG] All nginx ingress pods are ready & healthy." + else + echo "failure ❌" + echo -e "$debug_msg" + fi } diff --git a/pipeline/test/services/workload-cluster/testPodsReady.sh b/pipeline/test/services/workload-cluster/testPodsReady.sh index 66eb603fb4..0518ae16e6 100755 --- a/pipeline/test/services/workload-cluster/testPodsReady.sh +++ b/pipeline/test/services/workload-cluster/testPodsReady.sh @@ -20,38 +20,38 @@ echo "Testing deployments" echo "===================" deployments=( - "cert-manager cert-manager" - "cert-manager cert-manager-cainjector" - "cert-manager cert-manager-webhook" - "gatekeeper-system gatekeeper-controller-manager" - "ingress-nginx ingress-nginx-default-backend" - "kube-system coredns" - "kube-system metrics-server" - "monitoring kube-prometheus-stack-operator" - "monitoring kube-prometheus-stack-kube-state-metrics" + "cert-manager cert-manager" + "cert-manager cert-manager-cainjector" + "cert-manager cert-manager-webhook" + "gatekeeper-system gatekeeper-controller-manager" + "ingress-nginx ingress-nginx-default-backend" + "kube-system coredns" + "kube-system metrics-server" + "monitoring kube-prometheus-stack-operator" + "monitoring kube-prometheus-stack-kube-state-metrics" ) if "${enable_hnc}"; then - deployments+=("hnc-system hnc-controller-controller-manager") + deployments+=("hnc-system hnc-controller-controller-manager") - if "${enable_hnc_ha}"; then - deployments+=("hnc-system hnc-controller-webhook") - fi + if "${enable_hnc_ha}"; then + deployments+=("hnc-system hnc-controller-webhook") + fi fi if "${enable_falco}" && "${enable_falco_alerts}"; then - deployments+=("falco falco-falcosidekick") + deployments+=("falco falco-falcosidekick") fi if "${enable_velero}"; then - deployments+=("velero velero") + deployments+=("velero velero") fi resourceKind="Deployment" # Get json data in a smaller dataset simpleData="$(getStatus $resourceKind)" for deployment in "${deployments[@]}"; do - read -r -a arr <<< "$deployment" - namespace="${arr[0]}" - name="${arr[1]}" - testResourceExistenceFast "${resourceKind}" "${namespace}" "${name}" "${simpleData}" + read -r -a arr <<<"$deployment" + namespace="${arr[0]}" + name="${arr[1]}" + testResourceExistenceFast "${resourceKind}" "${namespace}" "${name}" "${simpleData}" done echo @@ -60,21 +60,21 @@ echo "Testing daemonsets" echo "==================" daemonsets=( - "kube-system node-local-dns" - "ingress-nginx ingress-nginx-controller" - "monitoring kube-prometheus-stack-prometheus-node-exporter" + "kube-system node-local-dns" + "ingress-nginx ingress-nginx-controller" + "monitoring kube-prometheus-stack-prometheus-node-exporter" ) if "${enable_falco}"; then - daemonsets+=("falco falco") + daemonsets+=("falco falco") fi if "${enable_fluentd}"; then - daemonsets+=( - "fluentd fluentd-fluentd-elasticsearch" - "fluentd-system fluentd-forwarder" - ) + daemonsets+=( + "fluentd fluentd-fluentd-elasticsearch" + "fluentd-system fluentd-forwarder" + ) fi if "${enable_velero}"; then - daemonsets+=("velero node-agent") + daemonsets+=("velero node-agent") fi if "${enable_kured}"; then daemonsets+=("kured kured") @@ -84,10 +84,10 @@ resourceKind="DaemonSet" # Get json data in a smaller dataset simpleData="$(getStatus $resourceKind)" for daemonset in "${daemonsets[@]}"; do - read -r -a arr <<< "$daemonset" - namespace="${arr[0]}" - name="${arr[1]}" - testResourceExistenceFast ${resourceKind} "${namespace}" "${name}" "${simpleData}" + read -r -a arr <<<"$daemonset" + namespace="${arr[0]}" + name="${arr[1]}" + testResourceExistenceFast ${resourceKind} "${namespace}" "${name}" "${simpleData}" done echo @@ -96,22 +96,22 @@ echo "Testing statefulsets" echo "====================" statefulsets=( - "monitoring prometheus-kube-prometheus-stack-prometheus" + "monitoring prometheus-kube-prometheus-stack-prometheus" ) if "${enable_user_alertmanager}"; then - statefulsets+=("alertmanager alertmanager-alertmanager") + statefulsets+=("alertmanager alertmanager-alertmanager") fi if "${enable_fluentd}" && "${enable_fluentd_audit}"; then - statefulsets+=("fluentd-system fluentd-aggregator") + statefulsets+=("fluentd-system fluentd-aggregator") fi resourceKind="StatefulSet" # Get json data in a smaller dataset simpleData="$(getStatus $resourceKind)" for statefulset in "${statefulsets[@]}"; do - read -r -a arr <<< "$statefulset" - namespace="${arr[0]}" - name="${arr[1]}" - testResourceExistenceFast ${resourceKind} "${namespace}" "${name}" "${simpleData}" + read -r -a arr <<<"$statefulset" + namespace="${arr[0]}" + name="${arr[1]}" + testResourceExistenceFast ${resourceKind} "${namespace}" "${name}" "${simpleData}" done diff --git a/pipeline/test/services/workload-cluster/testPrometheusTargets.sh b/pipeline/test/services/workload-cluster/testPrometheusTargets.sh index 4dda6f429c..f4289ab5c2 100755 --- a/pipeline/test/services/workload-cluster/testPrometheusTargets.sh +++ b/pipeline/test/services/workload-cluster/testPrometheusTargets.sh @@ -20,14 +20,14 @@ echo "===================================" # "monitoring/kube-prometheus-stack-kube-etcd/0 1" # "monitoring/kube-prometheus-stack-kube-proxy/0 1" wcTargets=( - "serviceMonitor/monitoring/kube-prometheus-stack-apiserver/0 ${masterNodes}" - "serviceMonitor/monitoring/kube-prometheus-stack-coredns/0 2" - "serviceMonitor/monitoring/kube-prometheus-stack-kube-state-metrics/0 1" - "serviceMonitor/monitoring/kube-prometheus-stack-kubelet/0 ${totalNodes}" - "serviceMonitor/monitoring/kube-prometheus-stack-kubelet/1 ${totalNodes}" - "serviceMonitor/monitoring/kube-prometheus-stack-prometheus-node-exporter/0 ${totalNodes}" - "serviceMonitor/monitoring/kube-prometheus-stack-operator/0 1" - "serviceMonitor/monitoring/kube-prometheus-stack-prometheus/0 1" + "serviceMonitor/monitoring/kube-prometheus-stack-apiserver/0 ${masterNodes}" + "serviceMonitor/monitoring/kube-prometheus-stack-coredns/0 2" + "serviceMonitor/monitoring/kube-prometheus-stack-kube-state-metrics/0 1" + "serviceMonitor/monitoring/kube-prometheus-stack-kubelet/0 ${totalNodes}" + "serviceMonitor/monitoring/kube-prometheus-stack-kubelet/1 ${totalNodes}" + "serviceMonitor/monitoring/kube-prometheus-stack-prometheus-node-exporter/0 ${totalNodes}" + "serviceMonitor/monitoring/kube-prometheus-stack-operator/0 1" + "serviceMonitor/monitoring/kube-prometheus-stack-prometheus/0 1" ) test_targets_retry "svc/kube-prometheus-stack-prometheus" "${wcTargets[@]}" diff --git a/pipeline/test/services/workload-cluster/testUserRbac.sh b/pipeline/test/services/workload-cluster/testUserRbac.sh index 333edd55da..46118b96a5 100755 --- a/pipeline/test/services/workload-cluster/testUserRbac.sh +++ b/pipeline/test/services/workload-cluster/testUserRbac.sh @@ -6,12 +6,14 @@ # 3. namespace # 4. user function testCanUserDoInNamespace { - echo -n "$4 $1 $2 in $3" - if kubectl auth can-i "$1" "$2" -n "$3" --as "$4" > /dev/null 2>&1; - then echo -e "\tauthorized ✔"; SUCCESSES=$((SUCCESSES+1)) - else - echo -e "\tnot authorized ❌"; FAILURES=$((FAILURES+1)) - fi + echo -n "$4 $1 $2 in $3" + if kubectl auth can-i "$1" "$2" -n "$3" --as "$4" >/dev/null 2>&1; then + echo -e "\tauthorized ✔" + SUCCESSES=$((SUCCESSES + 1)) + else + echo -e "\tnot authorized ❌" + FAILURES=$((FAILURES + 1)) + fi } # Args: @@ -19,12 +21,14 @@ function testCanUserDoInNamespace { # 2. resource # 3. user function testCanUserDo { - echo -n -e "$3 $1 $2" - if kubectl auth can-i "$1" "$2" --as "$3" > /dev/null 2>&1; - then echo -e "\tauthorized ✔"; SUCCESSES=$((SUCCESSES+1)) - else - echo -e "\tnot authorized ❌"; FAILURES=$((FAILURES+1)) - fi + echo -n -e "$3 $1 $2" + if kubectl auth can-i "$1" "$2" --as "$3" >/dev/null 2>&1; then + echo -e "\tauthorized ✔" + SUCCESSES=$((SUCCESSES + 1)) + else + echo -e "\tnot authorized ❌" + FAILURES=$((FAILURES + 1)) + fi } # Args: @@ -33,12 +37,14 @@ function testCanUserDo { # 3. namespace # 4. user function testCannotUserDoInNamespace { - echo -n "$4 $1 $2 in $3" - if kubectl auth can-i "$1" "$2" -n "$3" --as "$4" > /dev/null 2>&1; - then echo -e "\tauthorized ❌"; FAILURES=$((FAILURES+1)) - else - echo -e "\tnot authorized ✔"; SUCCESSES=$((SUCCESSES+1)) - fi + echo -n "$4 $1 $2 in $3" + if kubectl auth can-i "$1" "$2" -n "$3" --as "$4" >/dev/null 2>&1; then + echo -e "\tauthorized ❌" + FAILURES=$((FAILURES + 1)) + else + echo -e "\tnot authorized ✔" + SUCCESSES=$((SUCCESSES + 1)) + fi } # Args: @@ -46,12 +52,14 @@ function testCannotUserDoInNamespace { # 2. resource # 3. user function testCannotUserDo { - echo -n -e "$3 $1 $2" - if kubectl auth can-i "$1" "$2" --as "$3" > /dev/null 2>&1; - then echo -e "\tauthorized ❌"; FAILURES=$((FAILURES+1)) - else - echo -e "\tnot authorized ✔"; SUCCESSES=$((SUCCESSES+1)) - fi + echo -n -e "$3 $1 $2" + if kubectl auth can-i "$1" "$2" --as "$3" >/dev/null 2>&1; then + echo -e "\tauthorized ❌" + FAILURES=$((FAILURES + 1)) + else + echo -e "\tnot authorized ✔" + SUCCESSES=$((SUCCESSES + 1)) + fi } # Args: @@ -60,12 +68,14 @@ function testCannotUserDo { # 3. namespace # 4. group function testCanGroupDoInNamespace { - echo -n "$4 $1 $2 in $3" - if kubectl auth can-i "$1" "$2" -n "$3" --as "tester" --as-group "$4" > /dev/null 2>&1; - then echo -e "\tauthorized ✔"; SUCCESSES=$((SUCCESSES+1)) - else - echo -e "\tnot authorized ❌"; FAILURES=$((FAILURES+1)) - fi + echo -n "$4 $1 $2 in $3" + if kubectl auth can-i "$1" "$2" -n "$3" --as "tester" --as-group "$4" >/dev/null 2>&1; then + echo -e "\tauthorized ✔" + SUCCESSES=$((SUCCESSES + 1)) + else + echo -e "\tnot authorized ❌" + FAILURES=$((FAILURES + 1)) + fi } # Args: @@ -73,12 +83,14 @@ function testCanGroupDoInNamespace { # 2. resource # 3. group function testCanGroupDo { - echo -n -e "$3 $1 $2" - if kubectl auth can-i "$1" "$2" --as "tester" --as-group "$3" > /dev/null 2>&1; - then echo -e "\tauthorized ✔"; SUCCESSES=$((SUCCESSES+1)) - else - echo -e "\tnot authorized ❌"; FAILURES=$((FAILURES+1)) - fi + echo -n -e "$3 $1 $2" + if kubectl auth can-i "$1" "$2" --as "tester" --as-group "$3" >/dev/null 2>&1; then + echo -e "\tauthorized ✔" + SUCCESSES=$((SUCCESSES + 1)) + else + echo -e "\tnot authorized ❌" + FAILURES=$((FAILURES + 1)) + fi } # Args: @@ -87,12 +99,14 @@ function testCanGroupDo { # 3. namespace # 4. group function testCannotGroupDoInNamespace { - echo -n "$4 $1 $2 in $3" - if kubectl auth can-i "$1" "$2" -n "$3" --as "tester" --as-group "$4" > /dev/null 2>&1; - then echo -e "\tauthorized ❌"; FAILURES=$((FAILURES+1)) - else - echo -e "\tnot authorized ✔"; SUCCESSES=$((SUCCESSES+1)) - fi + echo -n "$4 $1 $2 in $3" + if kubectl auth can-i "$1" "$2" -n "$3" --as "tester" --as-group "$4" >/dev/null 2>&1; then + echo -e "\tauthorized ❌" + FAILURES=$((FAILURES + 1)) + else + echo -e "\tnot authorized ✔" + SUCCESSES=$((SUCCESSES + 1)) + fi } # Args: @@ -100,12 +114,14 @@ function testCannotGroupDoInNamespace { # 2. resource # 3. group function testCannotGroupDo { - echo -n -e "$3 $1 $2" - if kubectl auth can-i "$1" "$2" --as "tester" --as-group "$3" > /dev/null 2>&1; - then echo -e "\tauthorized ❌"; FAILURES=$((FAILURES+1)) - else - echo -e "\tnot authorized ✔"; SUCCESSES=$((SUCCESSES+1)) - fi + echo -n -e "$3 $1 $2" + if kubectl auth can-i "$1" "$2" --as "tester" --as-group "$3" >/dev/null 2>&1; then + echo -e "\tauthorized ❌" + FAILURES=$((FAILURES + 1)) + else + echo -e "\tnot authorized ✔" + SUCCESSES=$((SUCCESSES + 1)) + fi } echo @@ -117,129 +133,128 @@ user_namespaces=$(yq4 '.user.namespaces[]' "$CONFIG_FILE") mapfile -t user_admin_users < <(yq4 '.user.adminUsers[]' "$CONFIG_FILE") for user in "${user_admin_users[@]}"; do - testCanUserDo "get" "node" "$user" - testCanUserDo "get" "namespace" "$user" - testCannotUserDo "drain" "node" "$user" - testCannotUserDo "create" "namespace" "$user" + testCanUserDo "get" "node" "$user" + testCanUserDo "get" "namespace" "$user" + testCannotUserDo "drain" "node" "$user" + testCannotUserDo "create" "namespace" "$user" done VERBS=( - create - delete + create + delete ) RESOURCES=( - deployments + deployments ) for user in "${user_admin_users[@]}"; do - for namespace in ${user_namespaces}; do - for resource in "${RESOURCES[@]}"; do - for verb in "${VERBS[@]}"; do - testCanUserDoInNamespace "$verb" "$resource" "$namespace" "$user" - done - done + for namespace in ${user_namespaces}; do + for resource in "${RESOURCES[@]}"; do + for verb in "${VERBS[@]}"; do + testCanUserDoInNamespace "$verb" "$resource" "$namespace" "$user" + done done + done done VERBS=( - create - delete - patch - update + create + delete + patch + update ) RESOURCES=( - deployments - daemonset - statefulset - secrets + deployments + daemonset + statefulset + secrets ) CK8S_NAMESPACES=( - cert-manager - default - falco - fluentd - fluentd-system - kube-system - monitoring - ingress-nginx - velero + cert-manager + default + falco + fluentd + fluentd-system + kube-system + monitoring + ingress-nginx + velero ) for user in "${user_admin_users[@]}"; do - for namespace in "${CK8S_NAMESPACES[@]}"; do - for resource in "${RESOURCES[@]}"; do - for verb in "${VERBS[@]}"; do - testCannotUserDoInNamespace "$verb" "$resource" "$namespace" "$user" - done - done + for namespace in "${CK8S_NAMESPACES[@]}"; do + for resource in "${RESOURCES[@]}"; do + for verb in "${VERBS[@]}"; do + testCannotUserDoInNamespace "$verb" "$resource" "$namespace" "$user" + done done + done done FLUENTD_VERBS=( - patch + patch ) FLUENTD_RESOURCES=( - configmaps/fluentd-extra-config - configmaps/fluentd-extra-plugins + configmaps/fluentd-extra-config + configmaps/fluentd-extra-plugins ) for user in "${user_admin_users[@]}"; do - for resource in "${FLUENTD_RESOURCES[@]}"; do - for verb in "${FLUENTD_VERBS[@]}"; do - testCanUserDoInNamespace "$verb" "$resource" "fluentd" "$user" - done + for resource in "${FLUENTD_RESOURCES[@]}"; do + for verb in "${FLUENTD_VERBS[@]}"; do + testCanUserDoInNamespace "$verb" "$resource" "fluentd" "$user" done + done done -if [[ $ENABLE_USER_ALERTMANAGER == "true" ]] -then - ALERTMANAGER_SECRET_VERBS=( - update - ) - ALERTMANAGER_SECRET_RESOURCES=( - secret/alertmanager-alertmanager - secret/user-alertmanager-auth - ) - - for user in "${user_admin_users[@]}"; do - for resource in "${ALERTMANAGER_SECRET_RESOURCES[@]}"; do - for verb in "${ALERTMANAGER_SECRET_VERBS[@]}"; do - testCanUserDoInNamespace "$verb" "$resource" "monitoring" "$user" - done - done +if [[ $ENABLE_USER_ALERTMANAGER == "true" ]]; then + ALERTMANAGER_SECRET_VERBS=( + update + ) + ALERTMANAGER_SECRET_RESOURCES=( + secret/alertmanager-alertmanager + secret/user-alertmanager-auth + ) + + for user in "${user_admin_users[@]}"; do + for resource in "${ALERTMANAGER_SECRET_RESOURCES[@]}"; do + for verb in "${ALERTMANAGER_SECRET_VERBS[@]}"; do + testCanUserDoInNamespace "$verb" "$resource" "monitoring" "$user" + done done + done - ALERTMANAGER_SECRET_VERBS=( - create - delete - ) - ALERTMANAGER_SECRET_RESOURCES=( - secret/alertmanager-alertmanager - secret/user-alertmanager-auth - ) - - for user in "${user_admin_users[@]}"; do - for resource in "${ALERTMANAGER_SECRET_RESOURCES[@]}"; do - for verb in "${ALERTMANAGER_SECRET_VERBS[@]}"; do - testCannotUserDoInNamespace "$verb" "$resource" "monitoring" "$user" - done - done + ALERTMANAGER_SECRET_VERBS=( + create + delete + ) + ALERTMANAGER_SECRET_RESOURCES=( + secret/alertmanager-alertmanager + secret/user-alertmanager-auth + ) + + for user in "${user_admin_users[@]}"; do + for resource in "${ALERTMANAGER_SECRET_RESOURCES[@]}"; do + for verb in "${ALERTMANAGER_SECRET_VERBS[@]}"; do + testCannotUserDoInNamespace "$verb" "$resource" "monitoring" "$user" + done done + done - ALERTMANAGER_ROLEBINDING_VERBS=( - create - ) - ALERTMANAGER_ROLEBINDING_RESOURCES=( - rolebinding/alertmanager-configurer - ) - - for user in "${user_admin_users[@]}"; do - for resource in "${ALERTMANAGER_ROLEBINDING_RESOURCES[@]}"; do - for verb in "${ALERTMANAGER_ROLEBINDING_VERBS[@]}"; do - testCanUserDoInNamespace "$verb" "$resource" "monitoring" "$user" - done - done + ALERTMANAGER_ROLEBINDING_VERBS=( + create + ) + ALERTMANAGER_ROLEBINDING_RESOURCES=( + rolebinding/alertmanager-configurer + ) + + for user in "${user_admin_users[@]}"; do + for resource in "${ALERTMANAGER_ROLEBINDING_RESOURCES[@]}"; do + for verb in "${ALERTMANAGER_ROLEBINDING_VERBS[@]}"; do + testCanUserDoInNamespace "$verb" "$resource" "monitoring" "$user" + done done + done fi echo @@ -247,131 +262,129 @@ echo echo "Testing group RBAC" echo "=====================" - user_namespaces=$(yq4 '.user.namespaces[]' "$CONFIG_FILE") mapfile -t user_admin_groups < <(yq4 '.user.adminGroups[]' "$CONFIG_FILE") for group in "${user_admin_groups[@]}"; do - testCanGroupDo "get" "node" "$group" - testCanGroupDo "get" "namespace" "$group" - testCannotGroupDo "drain" "node" "$group" - testCannotGroupDo "create" "namespace" "$group" + testCanGroupDo "get" "node" "$group" + testCanGroupDo "get" "namespace" "$group" + testCannotGroupDo "drain" "node" "$group" + testCannotGroupDo "create" "namespace" "$group" done VERBS=( - create - delete + create + delete ) RESOURCES=( - deployments + deployments ) for group in "${user_admin_groups[@]}"; do - for namespace in ${user_namespaces}; do - for resource in "${RESOURCES[@]}"; do - for verb in "${VERBS[@]}"; do - testCanGroupDoInNamespace "$verb" "$resource" "$namespace" "$group" - done - done + for namespace in ${user_namespaces}; do + for resource in "${RESOURCES[@]}"; do + for verb in "${VERBS[@]}"; do + testCanGroupDoInNamespace "$verb" "$resource" "$namespace" "$group" + done done + done done VERBS=( - create - delete - patch - update + create + delete + patch + update ) RESOURCES=( - deployments - daemonset - statefulset - secrets + deployments + daemonset + statefulset + secrets ) CK8S_NAMESPACES=( - cert-manager - default - falco - fluentd - kube-system - monitoring - ingress-nginx - velero + cert-manager + default + falco + fluentd + kube-system + monitoring + ingress-nginx + velero ) for group in "${user_admin_groups[@]}"; do - for namespace in "${CK8S_NAMESPACES[@]}"; do - for resource in "${RESOURCES[@]}"; do - for verb in "${VERBS[@]}"; do - testCannotGroupDoInNamespace "$verb" "$resource" "$namespace" "$group" - done - done + for namespace in "${CK8S_NAMESPACES[@]}"; do + for resource in "${RESOURCES[@]}"; do + for verb in "${VERBS[@]}"; do + testCannotGroupDoInNamespace "$verb" "$resource" "$namespace" "$group" + done done + done done FLUENTD_VERBS=( - patch + patch ) FLUENTD_RESOURCES=( - configmaps/fluentd-extra-config - configmaps/fluentd-extra-plugins + configmaps/fluentd-extra-config + configmaps/fluentd-extra-plugins ) for group in "${user_admin_groups[@]}"; do - for resource in "${FLUENTD_RESOURCES[@]}"; do - for verb in "${FLUENTD_VERBS[@]}"; do - testCanGroupDoInNamespace "$verb" "$resource" "fluentd" "$group" - done + for resource in "${FLUENTD_RESOURCES[@]}"; do + for verb in "${FLUENTD_VERBS[@]}"; do + testCanGroupDoInNamespace "$verb" "$resource" "fluentd" "$group" done + done done -if [[ $ENABLE_USER_ALERTMANAGER == "true" ]] -then - ALERTMANAGER_SECRET_VERBS=( - update - ) - ALERTMANAGER_SECRET_RESOURCES=( - secret/alertmanager-alertmanager - secret/user-alertmanager-auth - ) - - for group in "${user_admin_groups[@]}"; do - for resource in "${ALERTMANAGER_SECRET_RESOURCES[@]}"; do - for verb in "${ALERTMANAGER_SECRET_VERBS[@]}"; do - testCanGroupDoInNamespace "$verb" "$resource" "monitoring" "$group" - done - done +if [[ $ENABLE_USER_ALERTMANAGER == "true" ]]; then + ALERTMANAGER_SECRET_VERBS=( + update + ) + ALERTMANAGER_SECRET_RESOURCES=( + secret/alertmanager-alertmanager + secret/user-alertmanager-auth + ) + + for group in "${user_admin_groups[@]}"; do + for resource in "${ALERTMANAGER_SECRET_RESOURCES[@]}"; do + for verb in "${ALERTMANAGER_SECRET_VERBS[@]}"; do + testCanGroupDoInNamespace "$verb" "$resource" "monitoring" "$group" + done done + done - ALERTMANAGER_SECRET_VERBS=( - create - delete - ) - ALERTMANAGER_SECRET_RESOURCES=( - secret/alertmanager-alertmanager - secret/user-alertmanager-auth - ) - - for group in "${user_admin_groups[@]}"; do - for resource in "${ALERTMANAGER_SECRET_RESOURCES[@]}"; do - for verb in "${ALERTMANAGER_SECRET_VERBS[@]}"; do - testCannotGroupDoInNamespace "$verb" "$resource" "monitoring" "$group" - done - done + ALERTMANAGER_SECRET_VERBS=( + create + delete + ) + ALERTMANAGER_SECRET_RESOURCES=( + secret/alertmanager-alertmanager + secret/user-alertmanager-auth + ) + + for group in "${user_admin_groups[@]}"; do + for resource in "${ALERTMANAGER_SECRET_RESOURCES[@]}"; do + for verb in "${ALERTMANAGER_SECRET_VERBS[@]}"; do + testCannotGroupDoInNamespace "$verb" "$resource" "monitoring" "$group" + done done + done - ALERTMANAGER_ROLEBINDING_VERBS=( - create - ) - ALERTMANAGER_ROLEBINDING_RESOURCES=( - rolebinding/alertmanager-configurer - ) - - for group in "${user_admin_groups[@]}"; do - for resource in "${ALERTMANAGER_ROLEBINDING_RESOURCES[@]}"; do - for verb in "${ALERTMANAGER_ROLEBINDING_VERBS[@]}"; do - testCanGroupDoInNamespace "$verb" "$resource" "monitoring" "$group" - done - done + ALERTMANAGER_ROLEBINDING_VERBS=( + create + ) + ALERTMANAGER_ROLEBINDING_RESOURCES=( + rolebinding/alertmanager-configurer + ) + + for group in "${user_admin_groups[@]}"; do + for resource in "${ALERTMANAGER_ROLEBINDING_RESOURCES[@]}"; do + for verb in "${ALERTMANAGER_ROLEBINDING_VERBS[@]}"; do + testCanGroupDoInNamespace "$verb" "$resource" "monitoring" "$group" + done done + done fi diff --git a/release/generate-release-notes.sh b/release/generate-release-notes.sh index ecd3ab0c84..c4c887096a 100755 --- a/release/generate-release-notes.sh +++ b/release/generate-release-notes.sh @@ -3,14 +3,14 @@ set -euo pipefail if ! command -v releaser >/dev/null; then - echo "releaser is not installed, install it by running: go install github.com/elastisys/releaser/cmd/releaser@latest" >&2 - echo "For more information see https://github.com/elastisys/releaser/#installation" >&2 - exit 1 + echo "releaser is not installed, install it by running: go install github.com/elastisys/releaser/cmd/releaser@latest" >&2 + echo "For more information see https://github.com/elastisys/releaser/#installation" >&2 + exit 1 fi function usage() { - echo "Usage: ${0} VERSION" >&2 - exit 1 + echo "Usage: ${0} VERSION" >&2 + exit 1 } [ ${#} -eq 1 ] || usage diff --git a/release/stage-release.sh b/release/stage-release.sh index faf2a76c58..23e500ddb8 100755 --- a/release/stage-release.sh +++ b/release/stage-release.sh @@ -3,14 +3,14 @@ set -euo pipefail if ! command -v releaser >/dev/null; then - echo "releaser is not installed, install it by running: go install github.com/elastisys/releaser/cmd/releaser@latest" >&2 - echo "For more information see https://github.com/elastisys/releaser/#installation" >&2 - exit 1 + echo "releaser is not installed, install it by running: go install github.com/elastisys/releaser/cmd/releaser@latest" >&2 + echo "For more information see https://github.com/elastisys/releaser/#installation" >&2 + exit 1 fi function usage() { - echo "Usage: ${0} VERSION" >&2 - exit 1 + echo "Usage: ${0} VERSION" >&2 + exit 1 } [ ${#} -eq 1 ] || usage @@ -32,7 +32,7 @@ git switch -c "staging-${full_version}" # for sha in ${CK8S_GIT_CHERRY_PICK:-}; do - git cherry-pick "${sha}" + git cherry-pick "${sha}" done # @@ -49,9 +49,9 @@ mkdir -p "${changelog_dir}" # notes. Also add an extra hashtag to please the markdownlint rule: # MD025 Multiple top level headers in the same document # TODO: Find a nicer way to do this. -[ "${patch}" != "0" ] && printf "\n#" >> "${changelog_path}" +[ "${patch}" != "0" ] && printf "\n#" >>"${changelog_path}" -releaser changelog compliantkubernetes-apps "${full_version}" >> "${changelog_path}" +releaser changelog compliantkubernetes-apps "${full_version}" >>"${changelog_path}" git add "${changelog_path}" git commit -m "Add changelog for release v${full_version}" diff --git a/restore/harbor/restore-harbor.sh b/restore/harbor/restore-harbor.sh index 7817c8f524..f345e60ad0 100755 --- a/restore/harbor/restore-harbor.sh +++ b/restore/harbor/restore-harbor.sh @@ -6,65 +6,65 @@ HOSTNAME=harbor-database backup_dir=backup/dbdump s3_download() { - : "${S3_BUCKET:?Missing S3_BUCKET}" - : "${S3_REGION_ENDPOINT:?Missing S3_REGION_ENDPOINT}" - if [[ -n "$SPECIFIC_BACKUP" ]]; then - backup_key=$SPECIFIC_BACKUP - else - backup_key=$(aws s3 ls "${S3_BUCKET}/backups" \ - --recursive \ - --endpoint-url="${S3_REGION_ENDPOINT}" \ - | sort | tail -n 1 | awk '{print $4}') - fi - echo "Downloading backup from s3 bucket ${backup_key}" >&2 - aws s3 cp "s3://${S3_BUCKET}/${backup_key}" harbor.tgz --endpoint-url="${S3_REGION_ENDPOINT}" + : "${S3_BUCKET:?Missing S3_BUCKET}" + : "${S3_REGION_ENDPOINT:?Missing S3_REGION_ENDPOINT}" + if [[ -n "$SPECIFIC_BACKUP" ]]; then + backup_key=$SPECIFIC_BACKUP + else + backup_key=$(aws s3 ls "${S3_BUCKET}/backups" \ + --recursive \ + --endpoint-url="${S3_REGION_ENDPOINT}" | + sort | tail -n 1 | awk '{print $4}') + fi + echo "Downloading backup from s3 bucket ${backup_key}" >&2 + aws s3 cp "s3://${S3_BUCKET}/${backup_key}" harbor.tgz --endpoint-url="${S3_REGION_ENDPOINT}" } -extract_backup(){ - echo "Extracting backups">&2 - tar xvf harbor.tgz - for backup_file in "registry" "postgres"; do - if [[ ! -f "${backup_dir}/${backup_file}.back" && -f "${backup_dir}/${backup_file}.back.gz" ]]; then - gzip -d < "${backup_dir}/${backup_file}.back.gz" > "${backup_dir}/${backup_file}.back" - rm "${backup_dir}/${backup_file}.back.gz" - fi - done +extract_backup() { + echo "Extracting backups" >&2 + tar xvf harbor.tgz + for backup_file in "registry" "postgres"; do + if [[ ! -f "${backup_dir}/${backup_file}.back" && -f "${backup_dir}/${backup_file}.back.gz" ]]; then + gzip -d <"${backup_dir}/${backup_file}.back.gz" >"${backup_dir}/${backup_file}.back" + rm "${backup_dir}/${backup_file}.back.gz" + fi + done } wait_for_db_ready() { - echo "Waiting for DB to be ready" >&2 - TIMEOUT=12 - while [ $TIMEOUT -gt 0 ]; do - if pg_isready -h $HOSTNAME | grep "accepting connections"; then - break - fi - TIMEOUT=$((TIMEOUT - 1)) - sleep 5 - done - if [ $TIMEOUT -eq 0 ]; then - echo "Harbor DB cannot reach within one minute." - exit 1 + echo "Waiting for DB to be ready" >&2 + TIMEOUT=12 + while [ $TIMEOUT -gt 0 ]; do + if pg_isready -h $HOSTNAME | grep "accepting connections"; then + break fi + TIMEOUT=$((TIMEOUT - 1)) + sleep 5 + done + if [ $TIMEOUT -eq 0 ]; then + echo "Harbor DB cannot reach within one minute." + exit 1 + fi } clean_database_data() { - echo "Dropping existing databases">&2 + echo "Dropping existing databases" >&2 psql -U postgres -d template1 -h $HOSTNAME -c "drop database registry;" psql -U postgres -d template1 -h $HOSTNAME -c "drop database postgres;" - echo "Creating clean database">&2 + echo "Creating clean database" >&2 psql -U postgres -d template1 -h $HOSTNAME -c "create database registry;" psql -U postgres -d template1 -h $HOSTNAME -c "create database postgres;" } restore_database() { - echo "Restoring database">&2 - psql -U postgres -h $HOSTNAME registry < ${backup_dir}/registry.back - psql -U postgres -h $HOSTNAME postgres < ${backup_dir}/postgres.back + echo "Restoring database" >&2 + psql -U postgres -h $HOSTNAME registry <${backup_dir}/registry.back + psql -U postgres -h $HOSTNAME postgres <${backup_dir}/postgres.back } -cleanup_local_files(){ - echo "Cleaning up local files">&2 +cleanup_local_files() { + echo "Cleaning up local files" >&2 rm harbor.tgz rm ${backup_dir}/registry.back rm ${backup_dir}/postgres.back @@ -84,15 +84,15 @@ azure_download() { : "${AZURE_CONTAINER_NAME:?Missing AZURE_CONTAINER_NAME}" if [[ -n "$SPECIFIC_BACKUP" ]]; then - backup_key=$SPECIFIC_BACKUP + backup_key=$SPECIFIC_BACKUP else - backup_key="$(az storage blob list \ - --account-name "${AZURE_ACCOUNT_NAME}" \ - --account-key "${AZURE_ACCOUNT_KEY}" \ - --container-name "${AZURE_CONTAINER_NAME}" \ - --prefix "backups" \ - --query 'sort_by([].{name:name, lastModified:properties.lastModified}, &lastModified)[-1].name' \ - -otsv)" + backup_key="$(az storage blob list \ + --account-name "${AZURE_ACCOUNT_NAME}" \ + --account-key "${AZURE_ACCOUNT_KEY}" \ + --container-name "${AZURE_CONTAINER_NAME}" \ + --prefix "backups" \ + --query 'sort_by([].{name:name, lastModified:properties.lastModified}, &lastModified)[-1].name' \ + -otsv)" fi echo "Downloading from Azure Blob Storage: ${backup_key}" >&2 diff --git a/scripts/S3/entry.sh b/scripts/S3/entry.sh index 1b14060500..1f4467807e 100755 --- a/scripts/S3/entry.sh +++ b/scripts/S3/entry.sh @@ -11,11 +11,11 @@ CK8S_AUTO_APPROVE=${CK8S_AUTO_APPROVE:-"false"} here="$(dirname "$(readlink -f "$0")")" log_info() { - echo -e "[\e[34mck8s\e[0m] ${*}" 1>&2 + echo -e "[\e[34mck8s\e[0m] ${*}" 1>&2 } log_error() { - echo -e "[\e[31mck8s\e[0m] ${*}" 1>&2 + echo -e "[\e[31mck8s\e[0m] ${*}" 1>&2 } common_default=$(yq4 -o j '.objectStorage // {}' "${CK8S_CONFIG_PATH}/defaults/common-config.yaml") @@ -28,7 +28,7 @@ sc_config=$(echo "${sc_default}" | yq4 eval-all --prettyPrint '. as $item ireduc # shellcheck disable=SC2016 wc_default=$(echo "${common_default}" | yq4 eval-all --prettyPrint '. as $item ireduce ({}; . * $item )' - <(yq4 -o j '.objectStorage // {}' "${CK8S_CONFIG_PATH}/defaults/wc-config.yaml")) # shellcheck disable=SC2016 -wc_config=$(echo "${wc_default}" | yq4 eval-all --prettyPrint '. as $item ireduce ({}; . * $item )' - <(yq4 -o j '.objectStorage // {}' "${CK8S_CONFIG_PATH}/common-config.yaml") <(yq4 -o j '.objectStorage // {}' "${CK8S_CONFIG_PATH}/wc-config.yaml") | yq4 '{"objectStorage":.}' -) +wc_config=$(echo "${wc_default}" | yq4 eval-all --prettyPrint '. as $item ireduce ({}; . * $item )' - <(yq4 -o j '.objectStorage // {}' "${CK8S_CONFIG_PATH}/common-config.yaml") <(yq4 -o j '.objectStorage // {}' "${CK8S_CONFIG_PATH}/wc-config.yaml") | yq4 '{"objectStorage":.}' -) objectstorage_type_sc=$(echo "${sc_config}" | yq4 '.objectStorage.type' -) objectstorage_type_wc=$(echo "${wc_config}" | yq4 '.objectStorage.type' -) @@ -37,40 +37,43 @@ objectstorage_type_wc=$(echo "${wc_config}" | yq4 '.objectStorage.type' -) [ "$objectstorage_type_wc" != "s3" ] && log_info "S3 is not enabled in workload cluster" if [ "$objectstorage_type_sc" != "s3" ] && [ "$objectstorage_type_wc" != "s3" ]; then - log_error "S3 is not enabled in either cluster, aborting!" - exit 1 + log_error "S3 is not enabled in either cluster, aborting!" + exit 1 fi [ "$objectstorage_type_sc" = "s3" ] && buckets_sc=$(echo "${sc_config}" | yq4 '.objectStorage.buckets.*' -) [ "$objectstorage_type_wc" = "s3" ] && buckets_wc=$(echo "${wc_config}" | yq4 '.objectStorage.buckets.*' -) -buckets=$( { echo "$buckets_sc"; echo "$buckets_wc"; } | sort | uniq | tr '\n' ' ' | sed s'/.$//') +buckets=$({ + echo "$buckets_sc" + echo "$buckets_wc" +} | sort | uniq | tr '\n' ' ' | sed s'/.$//') log_info "Operating on buckets: ${buckets// /', '}" function usage() { - log_error "Usage: $0 [--s3cfg config-path] create|delete" - exit 1 + log_error "Usage: $0 [--s3cfg config-path] create|delete" + exit 1 } if [ "$1" = "--s3cfg" ]; then - [ "$#" -ne 3 ] && log_error "Invalid number of arguments" && usage - action="$3" - cmd="${here}/manager.sh $1 $2 --$3 $buckets" - log_info "Using s3cmd config file: $2" + [ "$#" -ne 3 ] && log_error "Invalid number of arguments" && usage + action="$3" + cmd="${here}/manager.sh $1 $2 --$3 $buckets" + log_info "Using s3cmd config file: $2" else - [ "$#" -ne 1 ] && log_error "Invalid number of arguments" && usage - action="$1" - cmd="${here}/manager.sh --$1 $buckets" - log_info "Using s3cmd config file: ~/.s3cfg" + [ "$#" -ne 1 ] && log_error "Invalid number of arguments" && usage + action="$1" + cmd="${here}/manager.sh --$1 $buckets" + log_info "Using s3cmd config file: ~/.s3cfg" fi if [ "$action" = "delete" ] && ! ${CK8S_AUTO_APPROVE}; then - echo -n -e "[\e[34mck8s\e[0m] Are you sure you want to delete all buckets? (y/n): " 1>&2 - read -r reply - if [[ ! "$reply" =~ ^[yY]$ ]]; then - exit 1 - fi + echo -n -e "[\e[34mck8s\e[0m] Are you sure you want to delete all buckets? (y/n): " 1>&2 + read -r reply + if [[ ! "$reply" =~ ^[yY]$ ]]; then + exit 1 + fi fi log_info "Running: $cmd" diff --git a/scripts/S3/generate-s3cfg.sh b/scripts/S3/generate-s3cfg.sh index f21774cc4b..943e1434c1 100755 --- a/scripts/S3/generate-s3cfg.sh +++ b/scripts/S3/generate-s3cfg.sh @@ -6,16 +6,16 @@ set -eu -o pipefail # and tested S3 providers. function usage() { - echo "Usage:" 1>&2 - echo " $0 {aws|exoscale|safespring|citycloud|elastx} {access_key} {secret_key} {host_base} [region]" 1>&2 - echo " host_base - the host (and port if other than default) of the service" 1>&2 - echo " region - the location where the buckets should be stored. This is ignored for exoscale, safespring and citycloud." 1>&2 - echo "Examples:" 1>&2 - echo " $0 aws abc 123 s3.amazonaws.com eu-north-1" 1>&2 - echo " $0 exoscale abc 123 sos-ch-gva-2.exo.io" 1>&2 - echo " $0 safespring abc 123 s3.sto1.safedc.net" 1>&2 - echo " $0 cityloud abc 123 s3-kna1.citycloud.com:8080" 1>&2 - exit 1 + echo "Usage:" 1>&2 + echo " $0 {aws|exoscale|safespring|citycloud|elastx} {access_key} {secret_key} {host_base} [region]" 1>&2 + echo " host_base - the host (and port if other than default) of the service" 1>&2 + echo " region - the location where the buckets should be stored. This is ignored for exoscale, safespring and citycloud." 1>&2 + echo "Examples:" 1>&2 + echo " $0 aws abc 123 s3.amazonaws.com eu-north-1" 1>&2 + echo " $0 exoscale abc 123 sos-ch-gva-2.exo.io" 1>&2 + echo " $0 safespring abc 123 s3.sto1.safedc.net" 1>&2 + echo " $0 cityloud abc 123 s3-kna1.citycloud.com:8080" 1>&2 + exit 1 } [ "$#" -lt 4 ] && echo "Too few arguments" && usage @@ -25,19 +25,19 @@ access_key = $2 secret_key = $3 EOF if [ "$1" = "aws" ]; then -cat <&2 - echo " $0 [--s3cfg config-path] --create | -c bucket_name_1 [bucket_name_2 ...]" 1>&2 - echo " $0 [--s3cfg config-path] --delete | -d bucket_name_1 [bucket_name_2 ...]" 1>&2 - echo " $0 [--s3cfg config-path] --abort | -a bucket_name_1 [bucket_name_2 ...]" 1>&2 - exit 1 + echo "Usage:" 1>&2 + echo " $0 [--s3cfg config-path] --create | -c bucket_name_1 [bucket_name_2 ...]" 1>&2 + echo " $0 [--s3cfg config-path] --delete | -d bucket_name_1 [bucket_name_2 ...]" 1>&2 + echo " $0 [--s3cfg config-path] --abort | -a bucket_name_1 [bucket_name_2 ...]" 1>&2 + exit 1 } if [ "$1" = "--s3cfg" ]; then - [ "$#" -lt 4 ] && echo "Too few arguments" 1>&2 && usage - s3cmd='s3cmd --config '"${2}" - shift; shift + [ "$#" -lt 4 ] && echo "Too few arguments" 1>&2 && usage + s3cmd='s3cmd --config '"${2}" + shift + shift else - [ "$#" -lt 2 ] && echo "Too few arguments" 1>&2 && usage - s3cmd='s3cmd' + [ "$#" -lt 2 ] && echo "Too few arguments" 1>&2 && usage + s3cmd='s3cmd' fi case "$1" in - -c | --create ) ACTION=$CREATE_ACTION - ;; - -d | --delete ) ACTION=$DELETE_ACTION - ;; - -a | --abort ) ACTION=$ABORT_UPLOAD_ACTION - ;; +-c | --create) + ACTION=$CREATE_ACTION + ;; +-d | --delete) + ACTION=$DELETE_ACTION + ;; +-a | --abort) + ACTION=$ABORT_UPLOAD_ACTION + ;; esac shift buckets="$*" function create_bucket() { # arguments: bucket name - local bucket_name="$1" + local bucket_name="$1" - echo "checking status of bucket [${bucket_name}]" >&2 - BUCKET_EXISTS=$(echo "$S3_BUCKET_LIST" | awk "\$3~/^s3:\/\/${bucket_name}$/ {print \$3}") + echo "checking status of bucket [${bucket_name}]" >&2 + BUCKET_EXISTS=$(echo "$S3_BUCKET_LIST" | awk "\$3~/^s3:\/\/${bucket_name}$/ {print \$3}") - if [ "$BUCKET_EXISTS" ]; then - echo "bucket [${bucket_name}] already exists, do nothing" >&2 - else - echo "bucket [${bucket_name}] does not exist, creating it now" >&2 - ${s3cmd} mb "s3://${bucket_name}" - fi + if [ "$BUCKET_EXISTS" ]; then + echo "bucket [${bucket_name}] already exists, do nothing" >&2 + else + echo "bucket [${bucket_name}] does not exist, creating it now" >&2 + ${s3cmd} mb "s3://${bucket_name}" + fi } function delete_bucket() { # arguments: bucket name - local bucket_name="$1" + local bucket_name="$1" - echo "checking status of bucket [${bucket_name}]" >&2 - BUCKET_EXISTS=$(echo "$S3_BUCKET_LIST" | awk "\$3~/^s3:\/\/${bucket_name}$/ {print \$3}") + echo "checking status of bucket [${bucket_name}]" >&2 + BUCKET_EXISTS=$(echo "$S3_BUCKET_LIST" | awk "\$3~/^s3:\/\/${bucket_name}$/ {print \$3}") - if [ "$BUCKET_EXISTS" ]; then - echo "Bucket [${bucket_name}] exists, deleting it now" >&2 - ${s3cmd} rb "s3://${bucket_name}" --force --recursive - else - echo "bucket [${bucket_name}] does not exist, do nothing" >&2 - fi + if [ "$BUCKET_EXISTS" ]; then + echo "Bucket [${bucket_name}] exists, deleting it now" >&2 + ${s3cmd} rb "s3://${bucket_name}" --force --recursive + else + echo "bucket [${bucket_name}] does not exist, do nothing" >&2 + fi } function abort_multipart_uploads() { # arguments: bucket name - local bucket_name="$1" - - echo "checking status of bucket [${bucket_name}]" >&2 - ONGOING_UPLOADS=$(${s3cmd} multipart "s3://${bucket_name}" | \ - awk 'FNR > 2 { print $2 " " $3 }') # header has two lines - - if [ -n "$ONGOING_UPLOADS" ]; then - echo "The are ongoing multipart uploads, aborting them now" - echo "$ONGOING_UPLOADS" | while read -r line ; do - echo "Aborting $line" - ${s3cmd} abortmp "$line" - done - else - echo "No ongoing multipart uploads, do nothing" - fi + local bucket_name="$1" + + echo "checking status of bucket [${bucket_name}]" >&2 + ONGOING_UPLOADS=$(${s3cmd} multipart "s3://${bucket_name}" | + awk 'FNR > 2 { print $2 " " $3 }') # header has two lines + + if [ -n "$ONGOING_UPLOADS" ]; then + echo "The are ongoing multipart uploads, aborting them now" + echo "$ONGOING_UPLOADS" | while read -r line; do + echo "Aborting $line" + ${s3cmd} abortmp "$line" + done + else + echo "No ongoing multipart uploads, do nothing" + fi } # get a list of all the S3 buckets S3_BUCKET_LIST=$(${s3cmd} ls) if [[ "$ACTION" == "$CREATE_ACTION" ]]; then - echo 'Create buckets (only if they do not exist)' >&2 - # shellcheck disable=SC2068 - for bucket in ${buckets[@]}; do - create_bucket "$bucket" - done + echo 'Create buckets (only if they do not exist)' >&2 + # shellcheck disable=SC2068 + for bucket in ${buckets[@]}; do + create_bucket "$bucket" + done elif [[ "$ACTION" == "$DELETE_ACTION" ]]; then - echo 'Delete buckets' >&2 - # shellcheck disable=SC2068 - for bucket in ${buckets[@]}; do - delete_bucket "$bucket" - done + echo 'Delete buckets' >&2 + # shellcheck disable=SC2068 + for bucket in ${buckets[@]}; do + delete_bucket "$bucket" + done elif [[ "$ACTION" == "$ABORT_UPLOAD_ACTION" ]]; then - echo 'Abort multipart uploads to buckets' >&2 - # shellcheck disable=SC2068 - for bucket in ${buckets[@]}; do - abort_multipart_uploads "$bucket" - done + echo 'Abort multipart uploads to buckets' >&2 + # shellcheck disable=SC2068 + for bucket in ${buckets[@]}; do + abort_multipart_uploads "$bucket" + done else - echo 'Unknown action - Aborting!' >&2 && usage - exit 1 + echo 'Unknown action - Aborting!' >&2 && usage + exit 1 fi diff --git a/scripts/azure/storage-manager.sh b/scripts/azure/storage-manager.sh index 27c900574d..6e5ccbb366 100755 --- a/scripts/azure/storage-manager.sh +++ b/scripts/azure/storage-manager.sh @@ -10,11 +10,11 @@ readonly DELETE_ACTION="delete" readonly LIST_HARBOR_BACKUPS="list-harbor-backups" log_info() { - echo -e "[\e[34mck8s\e[0m] ${*}" 1>&2 + echo -e "[\e[34mck8s\e[0m] ${*}" 1>&2 } log_error() { - echo -e "[\e[31mck8s\e[0m] ${*}" 1>&2 + echo -e "[\e[31mck8s\e[0m] ${*}" 1>&2 } common_default=$(yq4 -o j '.objectStorage // {}' "${CK8S_CONFIG_PATH}/defaults/common-config.yaml") @@ -29,7 +29,7 @@ sc_config=$(echo "${sc_default}" | yq4 eval-all --prettyPrint '. as $item ireduc # shellcheck disable=SC2016 wc_default=$(echo "${common_default}" | yq4 eval-all --prettyPrint '. as $item ireduce ({}; . * $item )' - <(yq4 -o j '.objectStorage // {}' "${CK8S_CONFIG_PATH}/defaults/wc-config.yaml")) # shellcheck disable=SC2016 -wc_config=$(echo "${wc_default}" | yq4 eval-all --prettyPrint '. as $item ireduce ({}; . * $item )' - <(yq4 -o j '.objectStorage // {}' "${CK8S_CONFIG_PATH}/common-config.yaml") <(yq4 -o j '.objectStorage // {}' "${CK8S_CONFIG_PATH}/wc-config.yaml") | yq4 '{"objectStorage":.}' -) +wc_config=$(echo "${wc_default}" | yq4 eval-all --prettyPrint '. as $item ireduce ({}; . * $item )' - <(yq4 -o j '.objectStorage // {}' "${CK8S_CONFIG_PATH}/common-config.yaml") <(yq4 -o j '.objectStorage // {}' "${CK8S_CONFIG_PATH}/wc-config.yaml") | yq4 '{"objectStorage":.}' -) objectstorage_type_sc=$(echo "${sc_config}" | yq4 '.objectStorage.type' -) objectstorage_type_wc=$(echo "${wc_config}" | yq4 '.objectStorage.type' -) @@ -38,8 +38,8 @@ objectstorage_type_wc=$(echo "${wc_config}" | yq4 '.objectStorage.type' -) [ "$objectstorage_type_wc" != "azure" ] && log_info "Azure Storage is not enabled in workload cluster" if [ "$objectstorage_type_sc" != "azure" ] && [ "$objectstorage_type_wc" != "azure" ]; then - log_error "Azure Storage is not enabled in either cluster, aborting!" - exit 1 + log_error "Azure Storage is not enabled in either cluster, aborting!" + exit 1 fi [ "$objectstorage_type_sc" = "azure" ] && buckets_sc=$(echo "${sc_config}" | yq4 '.objectStorage.buckets.*' -) @@ -47,14 +47,17 @@ fi RESOURCE_GROUP=$(echo "${common_config}" | yq4 '.objectStorage.azure.resourceGroup') STORAGE_ACCOUNT=$(echo "${common_config}" | yq4 '.objectStorage.azure.storageAccountName') -CONTAINERS=$( { echo "${buckets_sc:-}"; echo "${buckets_wc:-}"; } | sort | uniq | tr '\n' ' ' | sed s'/.$//') +CONTAINERS=$({ + echo "${buckets_sc:-}" + echo "${buckets_wc:-}" +} | sort | uniq | tr '\n' ' ' | sed s'/.$//') log_info "Operating on containers: ${CONTAINERS// /', '}" function usage() { - echo "Usage:" 1>&2 - echo " $0 create" 1>&2 - echo " $0 delete" 1>&2 + echo "Usage:" 1>&2 + echo " $0 create" 1>&2 + echo " $0 delete" 1>&2 } if [ "${#}" -lt 1 ]; then @@ -64,91 +67,91 @@ fi case "$1" in create) - ACTION=$CREATE_ACTION - ;; + ACTION=$CREATE_ACTION + ;; delete) - ACTION=$DELETE_ACTION - ;; + ACTION=$DELETE_ACTION + ;; list-harbor-backups) - ACTION=$LIST_HARBOR_BACKUPS - ;; + ACTION=$LIST_HARBOR_BACKUPS + ;; esac shift function create_resource_group() { - log_info "checking if resource group exists" >&2 - GROUP_EXISTS=$(az group list --query '[].name' | jq --arg group "${RESOURCE_GROUP}" '. | index($group)') - if [ "$GROUP_EXISTS" != null ]; then - log_info "resource group [${RESOURCE_GROUP}] already exists" >&2 - log_info "continue using this group ? (y/n)" >&2 - read -r -n 1 cmdinput - case "$cmdinput" in - y|Y) return ;; - *) exit 0 ;; - esac - else - log_info "resource group [${RESOURCE_GROUP}] does not exist, creating it now" >&2 - az group create \ - --name "${RESOURCE_GROUP}" \ - --location "${AZURE_LOCATION}" --only-show-errors - fi + log_info "checking if resource group exists" >&2 + GROUP_EXISTS=$(az group list --query '[].name' | jq --arg group "${RESOURCE_GROUP}" '. | index($group)') + if [ "$GROUP_EXISTS" != null ]; then + log_info "resource group [${RESOURCE_GROUP}] already exists" >&2 + log_info "continue using this group ? (y/n)" >&2 + read -r -n 1 cmdinput + case "$cmdinput" in + y | Y) return ;; + *) exit 0 ;; + esac + else + log_info "resource group [${RESOURCE_GROUP}] does not exist, creating it now" >&2 + az group create \ + --name "${RESOURCE_GROUP}" \ + --location "${AZURE_LOCATION}" --only-show-errors + fi } function create_storage_account() { - log_info "checking storage account availability" >&2 - out=$(az storage account check-name --only-show-errors --name "${STORAGE_ACCOUNT}") - ACCOUNT_AVAILABLE=$(echo "$out" | jq -r .nameAvailable) - REASON=$(echo "$out" | jq -r .reason) - case $ACCOUNT_AVAILABLE in - false) - if [ "$REASON" == "AccountNameInvalid" ]; then - log_info "Account name invalid, must be only contain numbers and lowercase letters" - exit 0 - elif [ "$REASON" == "AlreadyExists" ]; then - log_info "storage account [${STORAGE_ACCOUNT}] already exists" >&2 - log_info "contnue using this account ? (y/n)" >&2 - read -r -n 1 cmdinput - case "$cmdinput" in - y) return ;; - n) exit 0 ;; - esac - fi - ;; - true) - log_info "creating storage account ${STORAGE_ACCOUNT}" - az storage account create \ - --name "$STORAGE_ACCOUNT" \ - --resource-group "${RESOURCE_GROUP}" \ - --location swedencentral \ - --sku Standard_RAGRS \ - --kind StorageV2 \ - --allow-blob-public-access false --only-show-errors - ;; - esac + log_info "checking storage account availability" >&2 + out=$(az storage account check-name --only-show-errors --name "${STORAGE_ACCOUNT}") + ACCOUNT_AVAILABLE=$(echo "$out" | jq -r .nameAvailable) + REASON=$(echo "$out" | jq -r .reason) + case $ACCOUNT_AVAILABLE in + false) + if [ "$REASON" == "AccountNameInvalid" ]; then + log_info "Account name invalid, must be only contain numbers and lowercase letters" + exit 0 + elif [ "$REASON" == "AlreadyExists" ]; then + log_info "storage account [${STORAGE_ACCOUNT}] already exists" >&2 + log_info "contnue using this account ? (y/n)" >&2 + read -r -n 1 cmdinput + case "$cmdinput" in + y) return ;; + n) exit 0 ;; + esac + fi + ;; + true) + log_info "creating storage account ${STORAGE_ACCOUNT}" + az storage account create \ + --name "$STORAGE_ACCOUNT" \ + --resource-group "${RESOURCE_GROUP}" \ + --location swedencentral \ + --sku Standard_RAGRS \ + --kind StorageV2 \ + --allow-blob-public-access false --only-show-errors + ;; + esac } function create_containers() { - CONTAINERS_LIST=$(az storage container list --account-name "$STORAGE_ACCOUNT" --query '[].name' --only-show-errors) - # shellcheck disable=SC2068 - for container in ${CONTAINERS[@]}; do - log_info "checking status of container ${container}" >&2 - CONTAINER_EXISTS=$(echo "$CONTAINERS_LIST" | jq --arg container "${container}" '. | index($container)') - if [ "$CONTAINER_EXISTS" != null ]; then - log_info "container ${container} already exists, do nothing" >&2 - else - log_info "container ${container} does not exist, creating it now" >&2 - az storage container create \ - -n "$container" \ - --account-name "$STORAGE_ACCOUNT" --only-show-errors - fi - done + CONTAINERS_LIST=$(az storage container list --account-name "$STORAGE_ACCOUNT" --query '[].name' --only-show-errors) + # shellcheck disable=SC2068 + for container in ${CONTAINERS[@]}; do + log_info "checking status of container ${container}" >&2 + CONTAINER_EXISTS=$(echo "$CONTAINERS_LIST" | jq --arg container "${container}" '. | index($container)') + if [ "$CONTAINER_EXISTS" != null ]; then + log_info "container ${container} already exists, do nothing" >&2 + else + log_info "container ${container} does not exist, creating it now" >&2 + az storage container create \ + -n "$container" \ + --account-name "$STORAGE_ACCOUNT" --only-show-errors + fi + done } function delete_all() { - az group delete --name "${RESOURCE_GROUP}" + az group delete --name "${RESOURCE_GROUP}" } function list_harbor_backups() { @@ -157,21 +160,21 @@ function list_harbor_backups() { } if [[ "$ACTION" == "$CREATE_ACTION" ]]; then - log_info "Creating Resource Group" >&2 - create_resource_group + log_info "Creating Resource Group" >&2 + create_resource_group - log_info "Creating Storage Account" >&2 - create_storage_account + log_info "Creating Storage Account" >&2 + create_storage_account - log_info "Creating Storage Containers" >&2 - create_containers "${CONTAINERS}" + log_info "Creating Storage Containers" >&2 + create_containers "${CONTAINERS}" elif [[ "$ACTION" == "$DELETE_ACTION" ]]; then - log_info "deleting..." >&2 - delete_all + log_info "deleting..." >&2 + delete_all elif [[ "$ACTION" == "$LIST_HARBOR_BACKUPS" ]]; then - log_info "Listing harbor backups" >&2 - list_harbor_backups + log_info "Listing harbor backups" >&2 + list_harbor_backups else - log_error 'Unknown action - Aborting!' >&2 && usage - exit 1 + log_error 'Unknown action - Aborting!' >&2 && usage + exit 1 fi diff --git a/scripts/charts.sh b/scripts/charts.sh index bf045bf3bc..b1e25a0344 100755 --- a/scripts/charts.sh +++ b/scripts/charts.sh @@ -15,7 +15,7 @@ out() { if [[ -t 1 ]]; then echo -e "${*}" else - sed -E 's/\\e\[[0-9]+m//g' <<< "${*}" + sed -E 's/\\e\[[0-9]+m//g' <<<"${*}" fi } @@ -31,7 +31,7 @@ run_diff() { chart="${1}" out "${chart}:" - current_version="$(yq4 .version "${CHARTS}/${chart}/Chart.yaml" 2> /dev/null || true)" + current_version="$(yq4 .version "${CHARTS}/${chart}/Chart.yaml" 2>/dev/null || true)" if [[ -z "${current_version}" ]] || [[ "${current_version}" == "null" ]]; then out " state: \e[33mskipped\e[0m - missing" return @@ -42,8 +42,8 @@ run_diff() { requested_version="$(yq4 ".charts.\"${chart}\"" "${INDEX}")" fi - error="$(helm show chart "${chart}" --version "${requested_version}" 2>&1 > /dev/null || true)" - error="$(grep "Error" <<< "${error}" || true)" + error="$(helm show chart "${chart}" --version "${requested_version}" 2>&1 >/dev/null || true)" + error="$(grep "Error" <<<"${error}" || true)" if [[ "${error}" =~ ^Error: ]]; then out " state: \e[31mfailure\e[0m - ${error##Error: }" return @@ -57,15 +57,15 @@ run_diff() { ;; all) rm -rf "/tmp/charts/${chart}" - helm pull "${chart}" --version "${requested_version}" 2>&1 > /dev/null --untar --untardir "/tmp/charts/${chart%%/*}" 2> /dev/null + helm pull "${chart}" --version "${requested_version}" --untar --untardir "/tmp/charts/${chart%%/*}" &>/dev/null if diff -r --color -U3 "${CHARTS}/${chart}" "/tmp/charts/${chart}"; then out " state: \e[32mvalid\e[0m" RETURN="1" fi rm -rf "/tmp/charts/${chart}" ;; - chart|crds|readme|values) - if diff --color -U3 --label "current: ${chart} - ${current_version}" <(helm show "${part}" "${CHARTS}/${chart}" 2> /dev/null) --label "requested: ${chart} - ${requested_version}" <(helm show "${part}" "${chart}" --version "${requested_version}" 2> /dev/null); then + chart | crds | readme | values) + if diff --color -U3 --label "current: ${chart} - ${current_version}" <(helm show "${part}" "${CHARTS}/${chart}" 2>/dev/null) --label "requested: ${chart} - ${requested_version}" <(helm show "${part}" "${chart}" --version "${requested_version}" 2>/dev/null); then out " state: \e[32mvalid\e[0m" RETURN="1" fi @@ -83,11 +83,11 @@ run_list() { requested_version="$(yq4 ".charts.\"${chart}\"" "${INDEX}")" - current_version="$(yq4 '.version + " - " + .appVersion' "${CHARTS}/${chart}/Chart.yaml" 2> /dev/null || true)" + current_version="$(yq4 '.version + " - " + .appVersion' "${CHARTS}/${chart}/Chart.yaml" 2>/dev/null || true)" current_appversion="${current_version#* - }" current_version="${current_version%% - *}" - latest_version="$(helm show chart "${chart}" 2> /dev/null | yq4 '.version + " - " + .appVersion' || true)" + latest_version="$(helm show chart "${chart}" 2>/dev/null | yq4 '.version + " - " + .appVersion' || true)" latest_appversion="${latest_version#* - }" latest_version="${latest_version%% - *}" @@ -121,7 +121,7 @@ run_pull() { requested_version="$(yq4 ".charts.\"${chart}\"" "${INDEX}")" out " requested-version: \e[34m${requested_version}\e[0m" - current_version="$(yq4 .version "${CHARTS}/${chart}/Chart.yaml" 2> /dev/null || true)" + current_version="$(yq4 .version "${CHARTS}/${chart}/Chart.yaml" 2>/dev/null || true)" if [[ "${current_version}" == "${requested_version}" ]] && [[ "${2:-}" != "--force" ]]; then out " state: \e[33mskipped\e[0m - up to date" return @@ -133,8 +133,8 @@ run_pull() { mkdir -p "${CHARTS}/tmp" - error="$(helm pull "${chart}" --version "${requested_version}" --untar --untardir "${CHARTS}/tmp/${chart%%/*}" 2>&1 > /dev/null || true)" - error="$(grep "Error" <<< "${error}" || true)" + error="$(helm pull "${chart}" --version "${requested_version}" --untar --untardir "${CHARTS}/tmp/${chart%%/*}" 2>&1 >/dev/null || true)" + error="$(grep "Error" <<<"${error}" || true)" if [[ -z "${error}" ]]; then rm -rf "${CHARTS:?}/${chart:?}" mkdir -p "${CHARTS}/${chart%%/*}" @@ -152,14 +152,14 @@ run_verify() { chart="${1}" out "${chart}:" - current_version="$(yq4 .version "${CHARTS}/${chart}/Chart.yaml" 2> /dev/null || true)" + current_version="$(yq4 .version "${CHARTS}/${chart}/Chart.yaml" 2>/dev/null || true)" if [[ -z "${current_version}" ]] || [[ "${current_version}" == "null" ]]; then out " state: \e[33mskipped\e[0m - missing" return fi - error="$(helm show chart "${chart}" --version "${current_version}" 2>&1 > /dev/null || true)" - error="$(grep "Error" <<< "${error}" || true)" + error="$(helm show chart "${chart}" --version "${current_version}" 2>&1 >/dev/null || true)" + error="$(grep "Error" <<<"${error}" || true)" if [[ "${error}" =~ ^Error: ]]; then out " state: \e[31mfailure\e[0m - ${error##Error: }" RETURN="1" @@ -168,7 +168,7 @@ run_verify() { rm -rf "/tmp/charts/${chart}" - helm pull "${chart}" --version "${current_version}" --untar --untardir "/tmp/charts/${chart%%/*}" 2> /dev/null + helm pull "${chart}" --version "${current_version}" --untar --untardir "/tmp/charts/${chart%%/*}" 2>/dev/null remote="$(find "/tmp/charts/${chart}" -type f -exec sha256sum {} + | awk '{print $1}' | sort | sha256sum)" local="$(find "${CHARTS}/${chart}" -type f -exec sha256sum {} + | awk '{print $1}' | sort | sha256sum)" @@ -196,7 +196,7 @@ usage() { } case "${1:-}" in -diff|list|pull|verify) +diff | list | pull | verify) case "${2:-}" in "") echo "error: ${1}: missing argument" diff --git a/scripts/clean-sc.sh b/scripts/clean-sc.sh index 1f70f7baf9..274f177cb3 100755 --- a/scripts/clean-sc.sh +++ b/scripts/clean-sc.sh @@ -21,16 +21,16 @@ config_load sc clusterAPIEnabled=$(yq4 '.clusterApi.enabled' "${config[config_file_sc]}") GATE_VALWEBHOOK=$( - "${here}/.././bin/ck8s" ops \ - kubectl sc get \ - validatingwebhookconfigurations \ - -l gatekeeper.sh/system=yes \ - -oname - ) + "${here}/.././bin/ck8s" ops \ + kubectl sc get \ + validatingwebhookconfigurations \ + -l gatekeeper.sh/system=yes \ + -oname +) if [ -n "${GATE_VALWEBHOOK}" ]; then - # Destroy gatekeeper validatingwebhook which could potentially prevent other resources from being deleted - "${here}/.././bin/ck8s" ops kubectl sc delete "${GATE_VALWEBHOOK}" + # Destroy gatekeeper validatingwebhook which could potentially prevent other resources from being deleted + "${here}/.././bin/ck8s" ops kubectl sc delete "${GATE_VALWEBHOOK}" fi # Makes sure to uninstall Velero properly: https://velero.io/docs/v1.13/uninstalling/ @@ -44,14 +44,14 @@ fi # Clean up any leftover challenges mapfile -t CHALLENGES < <( - "${here}/.././bin/ck8s" ops kubectl sc get challenge -A -oyaml | \ - yq4 '.items[] | .metadata.name + "," + .metadata.namespace' - ) + "${here}/.././bin/ck8s" ops kubectl sc get challenge -A -oyaml | + yq4 '.items[] | .metadata.name + "," + .metadata.namespace' +) for challenge in "${CHALLENGES[@]}"; do - IFS=, read -r name namespace <<< "${challenge}" - "${here}/.././bin/ck8s" ops \ - kubectl sc patch challenge "${name}" -n "${namespace}" \ - -p '{"metadata":{"finalizers":null}}' --type=merge + IFS=, read -r name namespace <<<"${challenge}" + "${here}/.././bin/ck8s" ops \ + kubectl sc patch challenge "${name}" -n "${namespace}" \ + -p '{"metadata":{"finalizers":null}}' --type=merge done if [ "${clusterAPIEnabled}" = "false" ]; then @@ -81,56 +81,56 @@ fi # Dex specific removal # Keep for now, we won't use Dex CRDs in the future "${here}/.././bin/ck8s" ops kubectl sc delete crds \ - authcodes.dex.coreos.com \ - authrequests.dex.coreos.com \ - connectors.dex.coreos.com \ - oauth2clients.dex.coreos.com \ - offlinesessionses.dex.coreos.com \ - passwords.dex.coreos.com \ - refreshtokens.dex.coreos.com \ - signingkeies.dex.coreos.com \ - devicerequests.dex.coreos.com \ - devicetokens.dex.coreos.com + authcodes.dex.coreos.com \ + authrequests.dex.coreos.com \ + connectors.dex.coreos.com \ + oauth2clients.dex.coreos.com \ + offlinesessionses.dex.coreos.com \ + passwords.dex.coreos.com \ + refreshtokens.dex.coreos.com \ + signingkeies.dex.coreos.com \ + devicerequests.dex.coreos.com \ + devicetokens.dex.coreos.com # Prometheus specific removal PROM_CRDS=$( - "${here}/.././bin/ck8s" ops \ - kubectl sc api-resources \ - --api-group=monitoring.coreos.com \ - -o name - ) + "${here}/.././bin/ck8s" ops \ + kubectl sc api-resources \ + --api-group=monitoring.coreos.com \ + -o name +) if [ -n "$PROM_CRDS" ]; then - # shellcheck disable=SC2086 - # We definitely want word splitting here. - "${here}/.././bin/ck8s" ops kubectl sc delete crds $PROM_CRDS + # shellcheck disable=SC2086 + # We definitely want word splitting here. + "${here}/.././bin/ck8s" ops kubectl sc delete crds $PROM_CRDS fi # Trivy specific removal TRIVY_CRDS=$( - "${here}/.././bin/ck8s" ops \ - kubectl sc api-resources \ - --api-group=aquasecurity.github.io \ - -o name - ) + "${here}/.././bin/ck8s" ops \ + kubectl sc api-resources \ + --api-group=aquasecurity.github.io \ + -o name +) # Delete CRDs if [ -n "$TRIVY_CRDS" ]; then - # shellcheck disable=SC2086 - # We definitely want word splitting here. - "${here}/.././bin/ck8s" ops kubectl sc delete crds $TRIVY_CRDS + # shellcheck disable=SC2086 + # We definitely want word splitting here. + "${here}/.././bin/ck8s" ops kubectl sc delete crds $TRIVY_CRDS fi # Delete Gatekeeper CRDs GATE_CRDS=$("${here}/.././bin/ck8s" ops kubectl sc get crds -l gatekeeper.sh/system=yes -oname) if [ -n "$GATE_CRDS" ]; then - # shellcheck disable=SC2086 - "${here}/.././bin/ck8s" ops kubectl sc delete --ignore-not-found=true $GATE_CRDS + # shellcheck disable=SC2086 + "${here}/.././bin/ck8s" ops kubectl sc delete --ignore-not-found=true $GATE_CRDS fi GATE_CONS=$("${here}/.././bin/ck8s" ops kubectl sc get crds -l gatekeeper.sh/constraint=yes -oname) if [ -n "$GATE_CONS" ]; then - # shellcheck disable=SC2086 - "${here}/.././bin/ck8s" ops kubectl sc delete --ignore-not-found=true $GATE_CONS + # shellcheck disable=SC2086 + "${here}/.././bin/ck8s" ops kubectl sc delete --ignore-not-found=true $GATE_CONS fi "${here}/.././bin/ck8s" ops helmfile sc -l app=admin-rbac destroy diff --git a/scripts/clean-wc.sh b/scripts/clean-wc.sh index 524442012a..07c75423b0 100755 --- a/scripts/clean-wc.sh +++ b/scripts/clean-wc.sh @@ -9,7 +9,7 @@ echo -e "Your current \u1b[33mCK8S_CONFIG_PATH\033[m is set to: \u1b[33;4m${CK8S echo -n "Do you want to continue (y/N): " read -r reply if [[ ${reply} != "y" ]]; then - exit 1 + exit 1 fi here="$(dirname "$(readlink -f "$0")")" @@ -22,16 +22,16 @@ config_load wc clusterAPIEnabled=$(yq4 '.clusterApi.enabled' "${config[config_file_wc]}") GATE_VALWEBHOOK=$( - "${here}/.././bin/ck8s" ops \ - kubectl wc get \ - validatingwebhookconfigurations \ - -l gatekeeper.sh/system=yes \ - -oname - ) + "${here}/.././bin/ck8s" ops \ + kubectl wc get \ + validatingwebhookconfigurations \ + -l gatekeeper.sh/system=yes \ + -oname +) if [ -n "${GATE_VALWEBHOOK}" ]; then - # Destroy gatekeeper validatingwebhook which could potentially prevent other resources from being deleted - "${here}/.././bin/ck8s" ops kubectl wc delete "${GATE_VALWEBHOOK}" + # Destroy gatekeeper validatingwebhook which could potentially prevent other resources from being deleted + "${here}/.././bin/ck8s" ops kubectl wc delete "${GATE_VALWEBHOOK}" fi # Destroy user subnamespaces before their parent namespaces, @@ -56,14 +56,14 @@ fi # Clean up any leftover challenges mapfile -t CHALLENGES < <( - "${here}/.././bin/ck8s" ops kubectl wc get challenge -A -oyaml | \ - yq4 '.items[] | .metadata.name + "," + .metadata.namespace' - ) + "${here}/.././bin/ck8s" ops kubectl wc get challenge -A -oyaml | + yq4 '.items[] | .metadata.name + "," + .metadata.namespace' +) for challenge in "${CHALLENGES[@]}"; do - IFS=, read -r name namespace <<< "${challenge}" - "${here}/.././bin/ck8s" ops \ - kubectl wc patch challenge "${name}" -n "${namespace}" \ - -p '{"metadata":{"finalizers":null}}' --type=merge + IFS=, read -r name namespace <<<"${challenge}" + "${here}/.././bin/ck8s" ops \ + kubectl wc patch challenge "${name}" -n "${namespace}" \ + -p '{"metadata":{"finalizers":null}}' --type=merge done if [ "${clusterAPIEnabled}" = "false" ]; then @@ -93,32 +93,32 @@ fi # Delete kube prometheus stack CRDs mapfile -t PROM_CRDS < <("${here}/.././bin/ck8s" ops kubectl wc api-resources --api-group=monitoring.coreos.com -o name) if [[ "${#PROM_CRDS[@]}" -gt 0 ]]; then - "${here}/.././bin/ck8s" ops kubectl wc delete crds "${PROM_CRDS[@]}" + "${here}/.././bin/ck8s" ops kubectl wc delete crds "${PROM_CRDS[@]}" fi # Delete Trivy CRDs mapfile -t TRIVY_CRDS < <("${here}/.././bin/ck8s" ops kubectl wc api-resources --api-group=aquasecurity.github.io -o name) if [[ "${#TRIVY_CRDS[@]}" -gt 0 ]]; then - "${here}/.././bin/ck8s" ops kubectl wc delete crds "${TRIVY_CRDS[@]}" + "${here}/.././bin/ck8s" ops kubectl wc delete crds "${TRIVY_CRDS[@]}" fi # Delete Gatekeeper CRDs GATE_CRDS=$("${here}/.././bin/ck8s" ops kubectl wc get crds -l gatekeeper.sh/system=yes -oname) if [ -n "$GATE_CRDS" ]; then - # shellcheck disable=SC2086 - "${here}/.././bin/ck8s" ops kubectl wc delete --ignore-not-found=true $GATE_CRDS + # shellcheck disable=SC2086 + "${here}/.././bin/ck8s" ops kubectl wc delete --ignore-not-found=true $GATE_CRDS fi GATE_CONS=$("${here}/.././bin/ck8s" ops kubectl wc get crds -l gatekeeper.sh/constraint=yes -oname) if [ -n "$GATE_CONS" ]; then - # shellcheck disable=SC2086 - "${here}/.././bin/ck8s" ops kubectl wc delete --ignore-not-found=true $GATE_CONS + # shellcheck disable=SC2086 + "${here}/.././bin/ck8s" ops kubectl wc delete --ignore-not-found=true $GATE_CONS fi # Delete HNC CRDs mapfile -t HNC_CRDS < <("${here}/.././bin/ck8s" ops kubectl wc api-resources --api-group=hnc.x-k8s.io -o name) if [[ "${#HNC_CRDS[@]}" -gt 0 ]]; then - "${here}/.././bin/ck8s" ops kubectl wc delete crds "${HNC_CRDS[@]}" + "${here}/.././bin/ck8s" ops kubectl wc delete crds "${HNC_CRDS[@]}" fi "${here}/.././bin/ck8s" ops helmfile wc -l app=admin-rbac destroy diff --git a/scripts/local-cluster.sh b/scripts/local-cluster.sh index 6d4e7e4ab7..36a3d2ef8b 100755 --- a/scripts/local-cluster.sh +++ b/scripts/local-cluster.sh @@ -100,11 +100,14 @@ index.state() { case "${state}" in "") - yq ".\"${cluster}\"" "${CK8S_CONFIG_PATH}/cluster-index.yaml" ;; + yq ".\"${cluster}\"" "${CK8S_CONFIG_PATH}/cluster-index.yaml" + ;; "delete") - yq -i "del(.\"${cluster}\")" "${CK8S_CONFIG_PATH}/cluster-index.yaml" ;; + yq -i "del(.\"${cluster}\")" "${CK8S_CONFIG_PATH}/cluster-index.yaml" + ;; *) - yq -i ".\"${cluster}\" = \"${state}\"" "${CK8S_CONFIG_PATH}/cluster-index.yaml" ;; + yq -i ".\"${cluster}\" = \"${state}\"" "${CK8S_CONFIG_PATH}/cluster-index.yaml" + ;; esac } @@ -164,14 +167,14 @@ cache() { fi local -a registryfiles - readarray -t registryfiles <<< "$(find "${HERE}/local-clusters/registries/" -type f)" + readarray -t registryfiles <<<"$(find "${HERE}/local-clusters/registries/" -type f)" for registryfile in "${registryfiles[@]}"; do local downstream name upstream downstream="$(yq -oy '.host | keys | .[0]' "${registryfile}")" - name="$(sed -e 's#http://##' -e 's#:.*##' <<< "${downstream}")" + name="$(sed -e 's#http://##' -e 's#:.*##' <<<"${downstream}")" upstream="$(yq -oy '.host | keys | .[1]' "${registryfile}")" @@ -354,13 +357,13 @@ create() { if [[ "$(index.state "${cluster}")" == "creating" ]]; then log.info "kind create cluster \"${cluster}\" using \"${config}\"" - kind create cluster --name "${cluster}" --config /dev/stdin <<< "$(envsubst < "${config}")" + kind create cluster --name "${cluster}" --config /dev/stdin <<<"$(envsubst <"${config}")" index.state "${cluster}" "configuring" fi mkdir -p "${CK8S_CONFIG_PATH}/.state" - kind get kubeconfig --name "${cluster}" > "${CK8S_CONFIG_PATH}/.state/kube_config_sc.yaml" - kind get kubeconfig --name "${cluster}" > "${CK8S_CONFIG_PATH}/.state/kube_config_wc.yaml" + kind get kubeconfig --name "${cluster}" >"${CK8S_CONFIG_PATH}/.state/kube_config_sc.yaml" + kind get kubeconfig --name "${cluster}" >"${CK8S_CONFIG_PATH}/.state/kube_config_wc.yaml" chmod 600 "${CK8S_CONFIG_PATH}/.state/kube_config_sc.yaml" chmod 600 "${CK8S_CONFIG_PATH}/.state/kube_config_wc.yaml" @@ -441,38 +444,49 @@ main() { case "${command}" in cache) case "${subcommand}" in - create|delete) - cache "${subcommand}" ;; + create | delete) + cache "${subcommand}" + ;; *) - log.usage ;; + log.usage + ;; esac ;; resolve) case "${subcommand}" in - create|delete) - resolve "${subcommand}" "${@:3}" ;; + create | delete) + resolve "${subcommand}" "${@:3}" + ;; *) - log.usage ;; + log.usage + ;; esac ;; config) - config "${@:2}" ;; + config "${@:2}" + ;; create) - create "${@:2}" ;; + create "${@:2}" + ;; delete) - delete "${@:2}" ;; + delete "${@:2}" + ;; list) case "${subcommand}" in - clusters|cluster) - list.clusters ;; - profiles|profile) - list.profiles ;; - *) - log.usage ;; + clusters | cluster) + list.clusters + ;; + profiles | profile) + list.profiles + ;; + *) + log.usage + ;; esac ;; *) - log.usage ;; + log.usage + ;; esac } diff --git a/scripts/migration/helm.sh b/scripts/migration/helm.sh index 5c268c2c20..74dffc019b 100644 --- a/scripts/migration/helm.sh +++ b/scripts/migration/helm.sh @@ -17,7 +17,7 @@ helm_chart_name() { log_fatal "usage: helm_chart_name " fi - helm_do "${1}" list -n "${2}" -oyaml 2> /dev/null | yq4 ".[] | select(.name == \"${3}\") | .chart | sub(\"-\d+\.\d+\.\d+$\", \"\")" + helm_do "${1}" list -n "${2}" -oyaml 2>/dev/null | yq4 ".[] | select(.name == \"${3}\") | .chart | sub(\"-\d+\.\d+\.\d+$\", \"\")" } helm_chart_version() { @@ -25,7 +25,7 @@ helm_chart_version() { log_fatal "usage: helm_chart_version " fi - helm_do "${1}" list -n "${2}" -oyaml 2> /dev/null | yq4 ".[] | select(.name == \"${3}\") | .chart | match(\"\d+\.\d+\.\d+\") | .string" + helm_do "${1}" list -n "${2}" -oyaml 2>/dev/null | yq4 ".[] | select(.name == \"${3}\") | .chart | match(\"\d+\.\d+\.\d+\") | .string" } helm_installed() { @@ -33,7 +33,7 @@ helm_installed() { log_fatal "usage: helm_installed " fi - helm_do "${1}" status -n "${2}" "${3}" > /dev/null 2>&1 + helm_do "${1}" status -n "${2}" "${3}" >/dev/null 2>&1 } helm_rollback() { diff --git a/scripts/migration/helmfile.sh b/scripts/migration/helmfile.sh index 6b1e37972e..1857f83064 100644 --- a/scripts/migration/helmfile.sh +++ b/scripts/migration/helmfile.sh @@ -74,19 +74,19 @@ helmfile_change_dispatch() { fi local list - if ! list="$(helmfile_list "${2}" "${@:3}" 2> /dev/null)"; then - log_warn "warning: ${2} ${*:3} had no matching releases" - return + if ! list="$(helmfile_list "${2}" "${@:3}" 2>/dev/null)"; then + log_warn "warning: ${2} ${*:3} had no matching releases" + return fi - list="$(yq4 -P '[.[] | select(.enabled and .installed)]' <<< "${list}")" + list="$(yq4 -P '[.[] | select(.enabled and .installed)]' <<<"${list}")" local length - length="$(yq4 -P 'length' <<< "${list}")" + length="$(yq4 -P 'length' <<<"${list}")" for index in $(seq 0 $((length - 1))); do - namespace="$(yq4 -P ".[${index}].namespace" <<< "${list}")" - name="$(yq4 -P ".[${index}].name" <<< "${list}")" + namespace="$(yq4 -P ".[${index}].namespace" <<<"${list}")" + name="$(yq4 -P ".[${index}].name" <<<"${list}")" - if helmfile_change "${2}" "namespace=${namespace},name=${name}" > /dev/null 2>&1; then + if helmfile_change "${2}" "namespace=${namespace},name=${name}" >/dev/null 2>&1; then log_info " - skipping ${2} ${namespace}/${name} no change" continue fi diff --git a/scripts/migration/lib.sh b/scripts/migration/lib.sh index 9d3ad09adb..fdf90ee42f 100644 --- a/scripts/migration/lib.sh +++ b/scripts/migration/lib.sh @@ -64,7 +64,7 @@ log_fatal() { # --- git version git_version() { - git -C "${ROOT}" describe --exact-match --tags 2> /dev/null || git -C "${ROOT}" rev-parse HEAD + git -C "${ROOT}" describe --exact-match --tags 2>/dev/null || git -C "${ROOT}" rev-parse HEAD } # --- config functions --- @@ -78,7 +78,7 @@ config_version() { local prefix="${1}" local version - version="$(yq4 ".global.ck8sVersion" <<< "${CONFIG["${prefix}"]}")" + version="$(yq4 ".global.ck8sVersion" <<<"${CONFIG["${prefix}"]}")" VERSION["${prefix}-config"]="${version}" version="${version#v}" @@ -108,40 +108,40 @@ config_validate() { done ;; - sc|wc) + sc | wc) log_info "validating ${1}-config" defaults="$(yq_merge "${CK8S_CONFIG_PATH}/defaults/common-config.yaml" "${CK8S_CONFIG_PATH}/defaults/${1}-config.yaml")" - setmes="$(yq_paths "set-me" <<< "${defaults}")" - conditional_setmes="$(yq_paths "set-me-if-*" <<< "${defaults}")" + setmes="$(yq_paths "set-me" <<<"${defaults}")" + conditional_setmes="$(yq_paths "set-me-if-*" <<<"${defaults}")" for setme in ${setmes}; do - compare=$(diff <(yq4 -oj "${setme}" <<< "${defaults}") <(yq4 -oj "${setme}" <<< "${CONFIG["${1}"]}") || true) + compare=$(diff <(yq4 -oj "${setme}" <<<"${defaults}") <(yq4 -oj "${setme}" <<<"${CONFIG["${1}"]}") || true) if [[ -z "${compare}" ]]; then - log_error "error: \"${setme//\"/}\" is unset in ${1}-config" - pass="false" + log_error "error: \"${setme//\"/}\" is unset in ${1}-config" + pass="false" fi done for condsetme in ${conditional_setmes}; do - required_condition=$(yq4 "${condsetme}" <<< "${defaults}" | sed -rn 's/set-me-if-(.*)/\1/p' | yq4 "[.] | flatten | .[0]") - if [[ $(yq4 "${required_condition}" <<< "${CONFIG["${1}"]}") == "true" ]]; then - compare=$(diff <(yq4 -oj "${condsetme}" <<< "${defaults}") <(yq4 -oj "${condsetme}" <<< "${CONFIG["${1}"]}") || true) + required_condition=$(yq4 "${condsetme}" <<<"${defaults}" | sed -rn 's/set-me-if-(.*)/\1/p' | yq4 "[.] | flatten | .[0]") + if [[ $(yq4 "${required_condition}" <<<"${CONFIG["${1}"]}") == "true" ]]; then + compare=$(diff <(yq4 -oj "${condsetme}" <<<"${defaults}") <(yq4 -oj "${condsetme}" <<<"${CONFIG["${1}"]}") || true) if [[ -z "${compare}" ]]; then - log_error "error: \"${condsetme//\"/}\" is unset in ${1}-config" - pass="false" + log_error "error: \"${condsetme//\"/}\" is unset in ${1}-config" + pass="false" fi fi done - sync_enabled=$(yq4 '.objectStorage.sync.enabled' <<< "${CONFIG["${1}"]}") - sync_default_enabled=$(yq4 '.objectStorage.sync.syncDefaultBuckets' <<< "${CONFIG["${1}"]}") + sync_enabled=$(yq4 '.objectStorage.sync.enabled' <<<"${CONFIG["${1}"]}") + sync_default_enabled=$(yq4 '.objectStorage.sync.syncDefaultBuckets' <<<"${CONFIG["${1}"]}") if [[ "${1}" = "sc" ]] && [[ "${sync_enabled}" = "true" ]] && [[ "${sync_default_enabled}" = "true" ]]; then log_info "checking sync swift" - check_harbor="$(yq4 '.harbor.persistence.type' <<< "${CONFIG["${1}"]}")" - check_thanos="$(yq4 '.thanos.objectStorage.type' <<< "${CONFIG["${1}"]}")" - check_sync_swift="$(yq4 '.objectStorage.sync.swift' <<< "${CONFIG["${1}"]}")" + check_harbor="$(yq4 '.harbor.persistence.type' <<<"${CONFIG["${1}"]}")" + check_thanos="$(yq4 '.thanos.objectStorage.type' <<<"${CONFIG["${1}"]}")" + check_sync_swift="$(yq4 '.objectStorage.sync.swift' <<<"${CONFIG["${1}"]}")" if { [[ "${check_harbor}" = "swift" ]] || [[ "${check_thanos}" = "swift" ]]; } && [[ "${check_sync_swift}" = "null" ]]; then log_error "error: swift is enabled for Harbor/Thanos, but .objectStorage.sync is missing swift configuration" @@ -159,7 +159,7 @@ config_validate() { log_warn_no_newline "config validation failed do you still want to continue? [y/N]: " read -r reply if [[ "${reply}" != "y" ]]; then - exit 1 + exit 1 fi else exit 1 @@ -231,8 +231,8 @@ check_config() { fi if ! check_sops "${CK8S_CONFIG_PATH}/secrets.yaml"; then - log_error "error: \"secrets.yaml\" is not encrypted" - pass="false" + log_error "error: \"secrets.yaml\" is not encrypted" + pass="false" fi if [[ "${pass}" = "false" ]]; then @@ -346,25 +346,25 @@ fi # Normally a signal handler can only run one command. Use this to be able to # add multiple traps for a single signal. append_trap() { - cmd="${1}" - signal="${2}" + cmd="${1}" + signal="${2}" - if [ "$(trap -p "${signal}")" = "" ]; then - # shellcheck disable=SC2064 - trap "${cmd}" "${signal}" - return - fi + if [ "$(trap -p "${signal}")" = "" ]; then + # shellcheck disable=SC2064 + trap "${cmd}" "${signal}" + return + fi - # shellcheck disable=SC2317 - previous_trap_cmd() { printf '%s\n' "$3"; } + # shellcheck disable=SC2317 + previous_trap_cmd() { printf '%s\n' "$3"; } - new_trap() { - eval "previous_trap_cmd $(trap -p "${signal}")" - printf '%s\n' "${cmd}" - } + new_trap() { + eval "previous_trap_cmd $(trap -p "${signal}")" + printf '%s\n' "${cmd}" + } - # shellcheck disable=SC2064 - trap "$(new_trap)" "${signal}" + # shellcheck disable=SC2064 + trap "$(new_trap)" "${signal}" } # shellcheck source=scripts/migration/helm.sh diff --git a/scripts/migration/yq.sh b/scripts/migration/yq.sh index 706050eba4..e47a7ee701 100644 --- a/scripts/migration/yq.sh +++ b/scripts/migration/yq.sh @@ -55,7 +55,7 @@ yq_move_to_file() { if ! yq_null "${1}" "${2}"; then log_info " - move: ${1} ${2} to ${4} ${3}" yq4 -oj -I0 "${2}" "${CK8S_CONFIG_PATH}/${1}-config.yaml" | - yq4 -i "${4} = load(\"/dev/stdin\")" "${CK8S_CONFIG_PATH}/${3}-config.yaml" + yq4 -i "${4} = load(\"/dev/stdin\")" "${CK8S_CONFIG_PATH}/${3}-config.yaml" yq4 -i "del(${2})" "${CK8S_CONFIG_PATH}/${1}-config.yaml" fi } diff --git a/scripts/report/score.sh b/scripts/report/score.sh index 6aba66305b..907eea82b0 100755 --- a/scripts/report/score.sh +++ b/scripts/report/score.sh @@ -5,9 +5,9 @@ set -euo pipefail here="$(dirname "$(readlink -f "${0}")")" root="$(dirname "$(dirname "${here}")")" -if command -v kube-score &> /dev/null; then +if command -v kube-score &>/dev/null; then cmd="kube-score" -elif command -v docker &> /dev/null; then +elif command -v docker &>/dev/null; then echo "warning: kube-score (https://github.com/zegl/kube-score) is not installed, using docker (docker.io/zegl/kube-score:latest)" >&2 cmd="docker run -i --rm docker.io/zegl/kube-score:latest" else @@ -22,15 +22,15 @@ score() { shift local releases - releases="$(helmfile --allow-no-matching-release -e "${target}_cluster" -f "${root}/helmfile.d/" list "${@/#/-l}" -q --output json)" - releases="$(yq4 -Poj '[.[] | select(.enabled and .installed) | {"namespace": .namespace, "name": .name}] | sort_by(.namespace, .name)' <<< "${releases}")" + releases="$(helmfile --allow-no-matching-release -e "${target}_cluster" -f "${root}/helmfile.d/" list "${@/#/-l}" -q --output json)" + releases="$(yq4 -Poj '[.[] | select(.enabled and .installed) | {"namespace": .namespace, "name": .name}] | sort_by(.namespace, .name)' <<<"${releases}")" local length - length="$(yq4 -Poy 'length' <<< "${releases}")" + length="$(yq4 -Poy 'length' <<<"${releases}")" for index in $(seq 0 $((length - 1))); do - namespace="$(yq4 -Poy ".[${index}].namespace" <<< "${releases}")" - name="$(yq4 -Poy ".[${index}].name" <<< "${releases}")" + namespace="$(yq4 -Poy ".[${index}].namespace" <<<"${releases}")" + name="$(yq4 -Poy ".[${index}].name" <<<"${releases}")" echo "templating ${target}/${namespace}/${name}" >&2 helmfile -e "${target}_cluster" -f "${root}/helmfile.d/" template -q "-lnamespace=${namespace},name=${name}" | yq4 "with(select(.metadata.namespace == null); .metadata.namespace = \"${namespace}\")" diff --git a/scripts/run-from-container.sh b/scripts/run-from-container.sh index 006c6cdd90..b9339ecaac 100755 --- a/scripts/run-from-container.sh +++ b/scripts/run-from-container.sh @@ -45,7 +45,7 @@ log.continue() { } yq() { - if command -v yq4 > /dev/null; then + if command -v yq4 >/dev/null; then command yq4 "${@}" else command yq "${@}" diff --git a/scripts/validate-pull-request.sh b/scripts/validate-pull-request.sh index ff4c7c8370..4bf4dfb06c 100755 --- a/scripts/validate-pull-request.sh +++ b/scripts/validate-pull-request.sh @@ -8,7 +8,7 @@ declare -a output # Raw input stage declare -a raw if [[ -f "${1:-}" ]]; then - readarray -t raw < "${1}" + readarray -t raw <"${1}" else echo "error: missing or invalid file argument" >&2 exit 1 @@ -79,7 +79,6 @@ for line in "${raw[@]}"; do done done - # Find kinds declare -a kinds for line in "${input[@]}"; do @@ -209,9 +208,9 @@ done # Output stage: Custom annotations for GitHub Actions and regular error output otherwise if [[ -n "${output[*]:-}" ]]; then if [[ -n "${GITHUB_ACTIONS:-}" ]]; then - echo "pull request failed validation:" >> "${GITHUB_STEP_SUMMARY:-}" + echo "pull request failed validation:" >>"${GITHUB_STEP_SUMMARY:-}" for line in "${output[@]}"; do - echo "- ${line}" >> "${GITHUB_STEP_SUMMARY:-}" + echo "- ${line}" >>"${GITHUB_STEP_SUMMARY:-}" echo "::error ::${line}" done else diff --git a/tests/bats.lib.bash b/tests/bats.lib.bash index c29c963e50..530365e035 100644 --- a/tests/bats.lib.bash +++ b/tests/bats.lib.bash @@ -57,7 +57,7 @@ mark.check() { # punch a marker for serial tests mark.punch() { if [[ -n "${TESTS_MARKER:-}" ]]; then - echo "pass" > "${TESTS_MARKER}" + echo "pass" >"${TESTS_MARKER}" else fail "cannot punch missing marker" fi @@ -212,24 +212,24 @@ cypress_setup() { for seq in $(seq 3); do [[ "${seq}" == "1" ]] || log.trace "cypress run: try ${seq}/3" - cypress run --no-runner-ui --spec "$1" --reporter json-stream --quiet > "${CYPRESS_REPORT}" || true + cypress run --no-runner-ui --spec "$1" --reporter json-stream --quiet >"${CYPRESS_REPORT}" || true # This happen seemingly at random if ! grep "Fatal JavaScript out of memory" "${CYPRESS_REPORT}" &>/dev/null; then - break; + break fi done popd || exit 1 # Without json events we have some failure - if ! grep -q '^\[.*\]$' < "${CYPRESS_REPORT}"; then + if ! grep -q '^\[.*\]$' <"${CYPRESS_REPORT}"; then cat "${CYPRESS_REPORT}" >&2 exit 1 fi # Filter json events - grep '^\[.*\]$' < "${CYPRESS_REPORT}" > "${CYPRESS_REPORT}.tmp" + grep '^\[.*\]$' <"${CYPRESS_REPORT}" >"${CYPRESS_REPORT}.tmp" mv "${CYPRESS_REPORT}.tmp" "${CYPRESS_REPORT}" # Check for any auto-generated error diff --git a/tests/common/bats/gpg.bash b/tests/common/bats/gpg.bash index a0e4e52cf9..f49d097b00 100644 --- a/tests/common/bats/gpg.bash +++ b/tests/common/bats/gpg.bash @@ -26,7 +26,7 @@ gpg.setup() { for n in $(seq 3); do # Retry as gpg-agent might not reliably start if gpg.auto_generate_key; then - break; + break fi echo "failed to generate gpg key try ${n}" >&2 n="0" diff --git a/tests/common/bats/harbor.bash b/tests/common/bats/harbor.bash index 5f921f0713..152c57e309 100644 --- a/tests/common/bats/harbor.bash +++ b/tests/common/bats/harbor.bash @@ -67,13 +67,13 @@ harbor.setup_project() { harbor.create_project "${harbor_project}" output="$(harbor.create_robot "${harbor_project}" "${harbor_robot}")" - jq -r .id <<< "${output}" > "${harbor_robot_id_path}" - jq -r .secret <<< "${output}" > "${harbor_robot_secret_path}" + jq -r .id <<<"${output}" >"${harbor_robot_id_path}" + jq -r .secret <<<"${output}" >"${harbor_robot_secret_path}" if [[ "${harbor_secure}" != "true" ]]; then - ctr.insecure login --username "${harbor_robot_fullname}" --password-stdin "${harbor_endpoint}" < "${harbor_robot_secret_path}" + ctr.insecure login --username "${harbor_robot_fullname}" --password-stdin "${harbor_endpoint}" <"${harbor_robot_secret_path}" else - ctr login --username "${harbor_robot_fullname}" --password-stdin "${harbor_endpoint}" < "${harbor_robot_secret_path}" + ctr login --username "${harbor_robot_fullname}" --password-stdin "${harbor_endpoint}" <"${harbor_robot_secret_path}" fi } @@ -123,7 +123,6 @@ harbor.post() { log.fatal "usage: harbor.post [resource/path] [json data] " fi - if [[ "${harbor_secure}" == "true" ]]; then curl -s -u "${harbor_username}:${harbor_password}" "https://${harbor_endpoint}/api/v2.0/${1}" -H "accept: application/json" -H "content-type: application/json" -d "${2}" "${@:3}" else diff --git a/tests/common/bats/update-ips.bash b/tests/common/bats/update-ips.bash index b839e6cf96..970462b66b 100644 --- a/tests/common/bats/update-ips.bash +++ b/tests/common/bats/update-ips.bash @@ -39,22 +39,22 @@ update_ips.mock_minimal() { mock_set_output "${mock_dig}" "127.0.0.2" 2 # .networkPolicies.global.scIngress.ips mock_set_output "${mock_dig}" "127.0.0.3" 3 # .networkPolicies.global.wcIngress.ips - mock_set_output "${mock_kubectl}" "127.0.1.1 127.0.2.1 127.0.3.1" 1 # .networkPolicies.global.scApiserver.ips node internal - mock_set_output "${mock_kubectl}" "127.0.1.2 127.0.2.2 127.0.3.2" 2 # .networkPolicies.global.scApiserver.ips calico ipip - mock_set_output "${mock_kubectl}" "127.0.1.21 127.0.2.21 127.0.3.21" 3 # .networkPolicies.global.scApiserver.ips calico vxlan - mock_set_output "${mock_kubectl}" "127.0.1.3 127.0.2.3 127.0.3.3" 4 # .networkPolicies.global.scApiserver.ips calico wireguard - mock_set_output "${mock_kubectl}" "127.0.1.7 127.0.2.7 127.0.3.7" 5 # .networkPolicies.global.scNodes.ips node internal - mock_set_output "${mock_kubectl}" "127.0.1.8 127.0.2.8 127.0.3.8" 6 # .networkPolicies.global.scNodes.ips calico ipip - mock_set_output "${mock_kubectl}" "127.0.1.81 127.0.2.81 127.0.3.81" 7 # .networkPolicies.global.scNodes.ips calico vxlan - mock_set_output "${mock_kubectl}" "127.0.1.9 127.0.2.9 127.0.3.9" 8 # .networkPolicies.global.scNodes.ips calico wireguard - mock_set_output "${mock_kubectl}" "127.0.1.4 127.0.2.4 127.0.3.4" 9 # .networkPolicies.global.wcApiserver.ips node internal - mock_set_output "${mock_kubectl}" "127.0.1.5 127.0.2.5 127.0.3.5" 10 # .networkPolicies.global.wcApiserver.ips calico ipip - mock_set_output "${mock_kubectl}" "127.0.1.51 127.0.2.51 127.0.3.51" 11 # .networkPolicies.global.wcApiserver.ips calico vxlan - mock_set_output "${mock_kubectl}" "127.0.1.6 127.0.2.6 127.0.3.6" 12 # .networkPolicies.global.wcApiserver.ips calico wireguard - mock_set_output "${mock_kubectl}" "127.0.1.10 127.0.2.10 127.0.3.10" 13 # .networkPolicies.global.wcNodes.ips node internal - mock_set_output "${mock_kubectl}" "127.0.1.11 127.0.2.11 127.0.3.11" 14 # .networkPolicies.global.wcNodes.ips calico ipip + mock_set_output "${mock_kubectl}" "127.0.1.1 127.0.2.1 127.0.3.1" 1 # .networkPolicies.global.scApiserver.ips node internal + mock_set_output "${mock_kubectl}" "127.0.1.2 127.0.2.2 127.0.3.2" 2 # .networkPolicies.global.scApiserver.ips calico ipip + mock_set_output "${mock_kubectl}" "127.0.1.21 127.0.2.21 127.0.3.21" 3 # .networkPolicies.global.scApiserver.ips calico vxlan + mock_set_output "${mock_kubectl}" "127.0.1.3 127.0.2.3 127.0.3.3" 4 # .networkPolicies.global.scApiserver.ips calico wireguard + mock_set_output "${mock_kubectl}" "127.0.1.7 127.0.2.7 127.0.3.7" 5 # .networkPolicies.global.scNodes.ips node internal + mock_set_output "${mock_kubectl}" "127.0.1.8 127.0.2.8 127.0.3.8" 6 # .networkPolicies.global.scNodes.ips calico ipip + mock_set_output "${mock_kubectl}" "127.0.1.81 127.0.2.81 127.0.3.81" 7 # .networkPolicies.global.scNodes.ips calico vxlan + mock_set_output "${mock_kubectl}" "127.0.1.9 127.0.2.9 127.0.3.9" 8 # .networkPolicies.global.scNodes.ips calico wireguard + mock_set_output "${mock_kubectl}" "127.0.1.4 127.0.2.4 127.0.3.4" 9 # .networkPolicies.global.wcApiserver.ips node internal + mock_set_output "${mock_kubectl}" "127.0.1.5 127.0.2.5 127.0.3.5" 10 # .networkPolicies.global.wcApiserver.ips calico ipip + mock_set_output "${mock_kubectl}" "127.0.1.51 127.0.2.51 127.0.3.51" 11 # .networkPolicies.global.wcApiserver.ips calico vxlan + mock_set_output "${mock_kubectl}" "127.0.1.6 127.0.2.6 127.0.3.6" 12 # .networkPolicies.global.wcApiserver.ips calico wireguard + mock_set_output "${mock_kubectl}" "127.0.1.10 127.0.2.10 127.0.3.10" 13 # .networkPolicies.global.wcNodes.ips node internal + mock_set_output "${mock_kubectl}" "127.0.1.11 127.0.2.11 127.0.3.11" 14 # .networkPolicies.global.wcNodes.ips calico ipip mock_set_output "${mock_kubectl}" "127.0.1.111 127.0.2.111 127.0.3.111" 15 # .networkPolicies.global.wcNodes.ips calico vxlan - mock_set_output "${mock_kubectl}" "127.0.1.12 127.0.2.12 127.0.3.12" 16 # .networkPolicies.global.wcNodes.ips calico wireguard + mock_set_output "${mock_kubectl}" "127.0.1.12 127.0.2.12 127.0.3.12" 16 # .networkPolicies.global.wcNodes.ips calico wireguard } update_ips.mock_maximal() { @@ -279,7 +279,7 @@ update_ips.assert_rclone_swift() { vary: Accept-Encoding\r \r {"token":{"catalog":[{"type": "object-store", "name": "swift", "endpoints": [{"interface":"public", "region": "swift-region", "url": "https://swift.foo.dev-ck8s.com"}]}]}}' 1 - mock_set_output "${mock_curl}" "" 2 # DELETE /auth/tokens + mock_set_output "${mock_curl}" "" 2 # DELETE /auth/tokens mock_set_output "${mock_dig}" "127.0.0.4" 4 # keystone endpoint mock_set_output "${mock_dig}" "127.0.0.5" 5 # swift endpoint } diff --git a/tests/common/bats/yq.bash b/tests/common/bats/yq.bash index ecd46358cd..f659c00d97 100644 --- a/tests/common/bats/yq.bash +++ b/tests/common/bats/yq.bash @@ -4,7 +4,7 @@ # conditionally run yq4 or yq depending on how it is installed yq() { - if command -v yq4 > /dev/null; then + if command -v yq4 >/dev/null; then command yq4 "${@}" else command yq "${@}" diff --git a/tests/common/exec.bash b/tests/common/exec.bash index 3fb6a87c0a..cb3ea106d6 100755 --- a/tests/common/exec.bash +++ b/tests/common/exec.bash @@ -175,7 +175,7 @@ suite.setup() { declare postvariables postvariables="$(env)" - sort <<< "$prevariables"$'\n'"$postvariables" | uniq -u > "suite.env" + sort <<<"$prevariables"$'\n'"$postvariables" | uniq -u >"suite.env" echo "info: environment set during setup: suite.env" echo "- make sure to export these variables before running the test suite and teardown suite!" diff --git a/tests/common/gen.bash b/tests/common/gen.bash index 98a4034f2f..3cbaa138c1 100755 --- a/tests/common/gen.bash +++ b/tests/common/gen.bash @@ -13,7 +13,7 @@ cypress_gen() { local -a input - readarray -t input < "${file}" + readarray -t input <"${file}" local bats cluster helmfile describe test @@ -31,9 +31,9 @@ cypress_gen() { elif [[ "${line}" =~ "// teardown_file" ]]; then teardown_file+=("${line##// teardown_file }") elif [[ "${line}" =~ [[:space:]]*describe\( ]]; then - describe="$(sed -n "s/describe([\"\']\(.\+\)[\"\'],.\+/\1/p" <<< "${line}")" + describe="$(sed -n "s/describe([\"\']\(.\+\)[\"\'],.\+/\1/p" <<<"${line}")" elif [[ "${line}" =~ [[:space:]]+it\( ]]; then - test="$(sed -n "s/[[:space:]]\+it([\"\']\(.\+\)[\"\'],.\+/\1/p" <<< "${line}")" + test="$(sed -n "s/[[:space:]]\+it([\"\']\(.\+\)[\"\'],.\+/\1/p" <<<"${line}")" its+=("${describe} ${test}") fi @@ -96,7 +96,7 @@ cypress() { local -a files if [[ "$#" -eq 0 ]]; then - readarray -t files <<< "$(find "${tests}" -type f -name '*.cy.js')" + readarray -t files <<<"$(find "${tests}" -type f -name '*.cy.js')" else files=("${@/#/"$(dirname "${tests}")/"}") fi @@ -104,7 +104,7 @@ cypress() { for file in "${files[@]}"; do echo "- ${file##"${root}/"}" - cypress_gen "${file}" > "${file/%.cy.js/.gen.bats}" + cypress_gen "${file}" >"${file/%.cy.js/.gen.bats}" done } @@ -114,7 +114,7 @@ template() { local -a files if [[ "$#" -eq 0 ]]; then - readarray -t files <<< "$(find "${tests}" -type f -name '*.bats.gotmpl')" + readarray -t files <<<"$(find "${tests}" -type f -name '*.bats.gotmpl')" else files=("${@/#/"$(dirname "${tests}")/"}") fi @@ -128,10 +128,10 @@ template() { args+=("--file=${file}" "--out=${file/%.bats.gotmpl/.gen.bats}") done - if command -v gomplate > /dev/null; then - pushd "${root}" &> /dev/null + if command -v gomplate >/dev/null; then + pushd "${root}" &>/dev/null gomplate "${args[@]}" - popd &> /dev/null + popd &>/dev/null else "${root}/scripts/run-from-container.sh" "docker.io/hairyhenderson/gomplate:v3.11.7-alpine" "${args[@]}" fi diff --git a/tests/end-to-end/harbor/use-api.bats b/tests/end-to-end/harbor/use-api.bats index 700c4908d4..3998c7247c 100644 --- a/tests/end-to-end/harbor/use-api.bats +++ b/tests/end-to-end/harbor/use-api.bats @@ -68,12 +68,12 @@ teardown_file() { assert_line --regexp ".*\"name\":\"robot.*" assert_success - jq -r .id <<< "${output}" > "${harbor_robot_id_path}" - jq -r .secret <<< "${output}" > "${harbor_robot_secret_path}" + jq -r .id <<<"${output}" >"${harbor_robot_id_path}" + jq -r .secret <<<"${output}" >"${harbor_robot_secret_path}" } @test "harbor api can authenticate with robot account" { - run skopeo login "${harbor_endpoint}" --username "${harbor_robot_fullname}" --password-stdin < "${harbor_robot_secret_path}" + run skopeo login "${harbor_endpoint}" --username "${harbor_robot_fullname}" --password-stdin <"${harbor_robot_secret_path}" assert_line --regexp "Login Succeeded" assert_success @@ -121,7 +121,7 @@ teardown_file() { } @test "harbor api can delete robot account" { - read -r harbor_robot_id < "${harbor_robot_id_path}" + read -r harbor_robot_id <"${harbor_robot_id_path}" run harbor.delete_robot "${harbor_robot_id}" refute_output diff --git a/tests/end-to-end/opa-gatekeeper/policies.bats b/tests/end-to-end/opa-gatekeeper/policies.bats index 5ae7024d74..c01b0b8e99 100644 --- a/tests/end-to-end/opa-gatekeeper/policies.bats +++ b/tests/end-to-end/opa-gatekeeper/policies.bats @@ -28,7 +28,7 @@ setup_file() { with_kubeconfig wc - if ! kubectl get ns staging &> /dev/null || [[ "$(kubectl get ns staging '-ojsonpath={.metadata.labels.owner}')" == "operator" ]]; then + if ! kubectl get ns staging &>/dev/null || [[ "$(kubectl get ns staging '-ojsonpath={.metadata.labels.owner}')" == "operator" ]]; then fail "these tests requires that you have a 'staging' user namespace" fi diff --git a/tests/end-to-end/velero/user-app.bats b/tests/end-to-end/velero/user-app.bats index e3ebb4a5ed..b58f0c3ed3 100644 --- a/tests/end-to-end/velero/user-app.bats +++ b/tests/end-to-end/velero/user-app.bats @@ -13,7 +13,7 @@ delete_test_namespace() { } create_test_application() { - image="${1}" envsubst < "${BATS_TEST_DIRNAME}/resources/test-application.yaml" | kubectl apply -f - --wait + image="${1}" envsubst <"${BATS_TEST_DIRNAME}/resources/test-application.yaml" | kubectl apply -f - --wait } wait_test_application() { diff --git a/tests/integration/harbor/use-api.bats b/tests/integration/harbor/use-api.bats index 31fa379d44..69891b4d15 100644 --- a/tests/integration/harbor/use-api.bats +++ b/tests/integration/harbor/use-api.bats @@ -71,12 +71,12 @@ teardown_file() { assert_line --regexp ".*\"name\":\"robot.*" assert_success - jq -r .id <<< "${output}" > "${harbor_robot_id_path}" - jq -r .secret <<< "${output}" > "${harbor_robot_secret_path}" + jq -r .id <<<"${output}" >"${harbor_robot_id_path}" + jq -r .secret <<<"${output}" >"${harbor_robot_secret_path}" } @test "harbor api can authenticate with robot account" { - run skopeo login --tls-verify=false "${harbor_endpoint}" --username "${harbor_robot_fullname}" --password-stdin < "${harbor_robot_secret_path}" + run skopeo login --tls-verify=false "${harbor_endpoint}" --username "${harbor_robot_fullname}" --password-stdin <"${harbor_robot_secret_path}" assert_line --regexp "Login Succeeded" assert_success @@ -125,7 +125,7 @@ teardown_file() { } @test "harbor api can delete robot account" { - read -r harbor_robot_id < "${harbor_robot_id_path}" + read -r harbor_robot_id <"${harbor_robot_id_path}" run harbor.delete_robot "${harbor_robot_id}" refute_output diff --git a/tests/unit/aws/validate-capi-air-gapped.gen.bats b/tests/unit/aws/validate-capi-air-gapped.gen.bats index d01bce421a..7bbc30d91a 100644 --- a/tests/unit/aws/validate-capi-air-gapped.gen.bats +++ b/tests/unit/aws/validate-capi-air-gapped.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - aws:capi:air-gapped - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - aws:capi:air-gapped - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - aws:capi:air-gapped - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - aws:capi:air-gapped - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/aws/validate-capi-dev.gen.bats b/tests/unit/aws/validate-capi-dev.gen.bats index 12bd3e27fb..2fd9655fda 100644 --- a/tests/unit/aws/validate-capi-dev.gen.bats +++ b/tests/unit/aws/validate-capi-dev.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - aws:capi:dev - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - aws:capi:dev - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - aws:capi:dev - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - aws:capi:dev - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/aws/validate-capi-prod.gen.bats b/tests/unit/aws/validate-capi-prod.gen.bats index c801ad8fdd..003a4d7cec 100644 --- a/tests/unit/aws/validate-capi-prod.gen.bats +++ b/tests/unit/aws/validate-capi-prod.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - aws:capi:prod - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - aws:capi:prod - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - aws:capi:prod - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - aws:capi:prod - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/aws/validate-kubespray-air-gapped.gen.bats b/tests/unit/aws/validate-kubespray-air-gapped.gen.bats index d23b0deff0..75b723cab3 100644 --- a/tests/unit/aws/validate-kubespray-air-gapped.gen.bats +++ b/tests/unit/aws/validate-kubespray-air-gapped.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - aws:kubespray:air-gapped - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - aws:kubespray:air-gapped - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - aws:kubespray:air-gapped - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - aws:kubespray:air-gapped - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/aws/validate-kubespray-dev.gen.bats b/tests/unit/aws/validate-kubespray-dev.gen.bats index 8b40320701..02012e64a4 100644 --- a/tests/unit/aws/validate-kubespray-dev.gen.bats +++ b/tests/unit/aws/validate-kubespray-dev.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - aws:kubespray:dev - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - aws:kubespray:dev - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - aws:kubespray:dev - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - aws:kubespray:dev - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/aws/validate-kubespray-prod.gen.bats b/tests/unit/aws/validate-kubespray-prod.gen.bats index 210809ee45..f83a0e7d74 100644 --- a/tests/unit/aws/validate-kubespray-prod.gen.bats +++ b/tests/unit/aws/validate-kubespray-prod.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - aws:kubespray:prod - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - aws:kubespray:prod - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - aws:kubespray:prod - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - aws:kubespray:prod - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/azure/validate-capi-air-gapped.gen.bats b/tests/unit/azure/validate-capi-air-gapped.gen.bats index 84c893514d..f4343b533c 100644 --- a/tests/unit/azure/validate-capi-air-gapped.gen.bats +++ b/tests/unit/azure/validate-capi-air-gapped.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - azure:capi:air-gapped - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - azure:capi:air-gapped - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - azure:capi:air-gapped - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - azure:capi:air-gapped - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/azure/validate-capi-dev.gen.bats b/tests/unit/azure/validate-capi-dev.gen.bats index a5841dfdf0..c871e5b128 100644 --- a/tests/unit/azure/validate-capi-dev.gen.bats +++ b/tests/unit/azure/validate-capi-dev.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - azure:capi:dev - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - azure:capi:dev - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - azure:capi:dev - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - azure:capi:dev - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/azure/validate-capi-prod.gen.bats b/tests/unit/azure/validate-capi-prod.gen.bats index 91e35cf798..c5809df7b4 100644 --- a/tests/unit/azure/validate-capi-prod.gen.bats +++ b/tests/unit/azure/validate-capi-prod.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - azure:capi:prod - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - azure:capi:prod - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - azure:capi:prod - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - azure:capi:prod - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/azure/validate-kubespray-air-gapped.gen.bats b/tests/unit/azure/validate-kubespray-air-gapped.gen.bats index 793baa71b1..cb6be58bca 100644 --- a/tests/unit/azure/validate-kubespray-air-gapped.gen.bats +++ b/tests/unit/azure/validate-kubespray-air-gapped.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - azure:kubespray:air-gapped - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - azure:kubespray:air-gapped - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - azure:kubespray:air-gapped - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - azure:kubespray:air-gapped - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/azure/validate-kubespray-dev.gen.bats b/tests/unit/azure/validate-kubespray-dev.gen.bats index f515f45c47..53be20f150 100644 --- a/tests/unit/azure/validate-kubespray-dev.gen.bats +++ b/tests/unit/azure/validate-kubespray-dev.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - azure:kubespray:dev - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - azure:kubespray:dev - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - azure:kubespray:dev - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - azure:kubespray:dev - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/azure/validate-kubespray-prod.gen.bats b/tests/unit/azure/validate-kubespray-prod.gen.bats index 9c056f4f5f..77afd25347 100644 --- a/tests/unit/azure/validate-kubespray-prod.gen.bats +++ b/tests/unit/azure/validate-kubespray-prod.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - azure:kubespray:prod - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - azure:kubespray:prod - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - azure:kubespray:prod - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - azure:kubespray:prod - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/baremetal/validate-capi-air-gapped.gen.bats b/tests/unit/baremetal/validate-capi-air-gapped.gen.bats index 5c8d4e7b95..d74996ca82 100644 --- a/tests/unit/baremetal/validate-capi-air-gapped.gen.bats +++ b/tests/unit/baremetal/validate-capi-air-gapped.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - baremetal:capi:air-gapped - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - baremetal:capi:air-gapped - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - baremetal:capi:air-gapped - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - baremetal:capi:air-gapped - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/baremetal/validate-capi-dev.gen.bats b/tests/unit/baremetal/validate-capi-dev.gen.bats index cd19f06389..7e836f6672 100644 --- a/tests/unit/baremetal/validate-capi-dev.gen.bats +++ b/tests/unit/baremetal/validate-capi-dev.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - baremetal:capi:dev - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - baremetal:capi:dev - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - baremetal:capi:dev - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - baremetal:capi:dev - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/baremetal/validate-capi-prod.gen.bats b/tests/unit/baremetal/validate-capi-prod.gen.bats index 63cdb4016f..db7f7dd4e1 100644 --- a/tests/unit/baremetal/validate-capi-prod.gen.bats +++ b/tests/unit/baremetal/validate-capi-prod.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - baremetal:capi:prod - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - baremetal:capi:prod - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - baremetal:capi:prod - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - baremetal:capi:prod - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/baremetal/validate-kubespray-air-gapped.gen.bats b/tests/unit/baremetal/validate-kubespray-air-gapped.gen.bats index 1d11102437..f821d20d24 100644 --- a/tests/unit/baremetal/validate-kubespray-air-gapped.gen.bats +++ b/tests/unit/baremetal/validate-kubespray-air-gapped.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - baremetal:kubespray:air-gapped - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - baremetal:kubespray:air-gapped - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - baremetal:kubespray:air-gapped - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - baremetal:kubespray:air-gapped - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/baremetal/validate-kubespray-dev.gen.bats b/tests/unit/baremetal/validate-kubespray-dev.gen.bats index f0be449f98..0efd321ac9 100644 --- a/tests/unit/baremetal/validate-kubespray-dev.gen.bats +++ b/tests/unit/baremetal/validate-kubespray-dev.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - baremetal:kubespray:dev - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - baremetal:kubespray:dev - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - baremetal:kubespray:dev - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - baremetal:kubespray:dev - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/baremetal/validate-kubespray-prod.gen.bats b/tests/unit/baremetal/validate-kubespray-prod.gen.bats index 1e796adc60..c06d597e10 100644 --- a/tests/unit/baremetal/validate-kubespray-prod.gen.bats +++ b/tests/unit/baremetal/validate-kubespray-prod.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - baremetal:kubespray:prod - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - baremetal:kubespray:prod - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - baremetal:kubespray:prod - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - baremetal:kubespray:prod - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/elastx/validate-capi-air-gapped.gen.bats b/tests/unit/elastx/validate-capi-air-gapped.gen.bats index 3a90d47b9b..df8a866f95 100644 --- a/tests/unit/elastx/validate-capi-air-gapped.gen.bats +++ b/tests/unit/elastx/validate-capi-air-gapped.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - elastx:capi:air-gapped - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - elastx:capi:air-gapped - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - elastx:capi:air-gapped - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - elastx:capi:air-gapped - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/elastx/validate-capi-dev.gen.bats b/tests/unit/elastx/validate-capi-dev.gen.bats index 3af59093c6..861f29ab11 100644 --- a/tests/unit/elastx/validate-capi-dev.gen.bats +++ b/tests/unit/elastx/validate-capi-dev.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - elastx:capi:dev - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - elastx:capi:dev - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - elastx:capi:dev - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - elastx:capi:dev - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/elastx/validate-capi-prod.gen.bats b/tests/unit/elastx/validate-capi-prod.gen.bats index 46cb383bf5..9332ec6fd7 100644 --- a/tests/unit/elastx/validate-capi-prod.gen.bats +++ b/tests/unit/elastx/validate-capi-prod.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - elastx:capi:prod - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - elastx:capi:prod - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - elastx:capi:prod - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - elastx:capi:prod - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/elastx/validate-kubespray-air-gapped.gen.bats b/tests/unit/elastx/validate-kubespray-air-gapped.gen.bats index df7053faf2..75c707d145 100644 --- a/tests/unit/elastx/validate-kubespray-air-gapped.gen.bats +++ b/tests/unit/elastx/validate-kubespray-air-gapped.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - elastx:kubespray:air-gapped - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - elastx:kubespray:air-gapped - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - elastx:kubespray:air-gapped - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - elastx:kubespray:air-gapped - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/elastx/validate-kubespray-dev.gen.bats b/tests/unit/elastx/validate-kubespray-dev.gen.bats index 2d51126f01..6384dc2ed9 100644 --- a/tests/unit/elastx/validate-kubespray-dev.gen.bats +++ b/tests/unit/elastx/validate-kubespray-dev.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - elastx:kubespray:dev - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - elastx:kubespray:dev - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - elastx:kubespray:dev - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - elastx:kubespray:dev - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/elastx/validate-kubespray-prod.gen.bats b/tests/unit/elastx/validate-kubespray-prod.gen.bats index 45e9ccbec6..38d13c0fef 100644 --- a/tests/unit/elastx/validate-kubespray-prod.gen.bats +++ b/tests/unit/elastx/validate-kubespray-prod.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - elastx:kubespray:prod - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - elastx:kubespray:prod - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - elastx:kubespray:prod - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - elastx:kubespray:prod - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/general/bin-conditional-set-me.bats b/tests/unit/general/bin-conditional-set-me.bats index 0b23b7ad76..6c1b01696e 100644 --- a/tests/unit/general/bin-conditional-set-me.bats +++ b/tests/unit/general/bin-conditional-set-me.bats @@ -488,7 +488,6 @@ _refute_condition_and_warn() { run _apply_normalise_sc _refute_condition_and_warn .\"networkPolicies\".\"rclone\".\"sync\".\"objectStorageSwift\".\"ips\" - yq.set sc .objectStorage.sync.enabled 'false' yq.set sc .thanos.objectStorage.type \"swift\" yq.set sc .harbor.persistence.type \"objectStorage\" diff --git a/tests/unit/general/bin-update-ips-rclone.bats b/tests/unit/general/bin-update-ips-rclone.bats index 26760e32c3..e5e7ef5ee1 100644 --- a/tests/unit/general/bin-update-ips-rclone.bats +++ b/tests/unit/general/bin-update-ips-rclone.bats @@ -260,7 +260,6 @@ _test_apply_rclone_sync_swift_remove_s3() { _test_apply_rclone_sync_swift_remove_s3 '.objectStorage.sync.destinationType' } - # --- swift add s3 ----------------------------------------------------------------------------------------------------- _test_apply_rclone_sync_swift_add_s3() { diff --git a/tests/unit/general/bin-update-ips-swift.bats b/tests/unit/general/bin-update-ips-swift.bats index 2470d4d508..23f5c8c0bd 100644 --- a/tests/unit/general/bin-update-ips-swift.bats +++ b/tests/unit/general/bin-update-ips-swift.bats @@ -51,7 +51,7 @@ teardown_file() { _test_requires_auth_endpoint() { yq.set sc "${1}" '"swift"' - yq.set sc .objectStorage.swift.authUrl '""' + yq.set sc .objectStorage.swift.authUrl '""' run ck8s update-ips both apply assert_failure diff --git a/tests/unit/general/bin-update-ips.bats b/tests/unit/general/bin-update-ips.bats index fa5a59d2a2..99cf495dc5 100644 --- a/tests/unit/general/bin-update-ips.bats +++ b/tests/unit/general/bin-update-ips.bats @@ -205,7 +205,7 @@ _configure_maximal() { _configure_maximal if [[ "${CK8S_TESTS_REGENERATE_RESOURCES:-}" == "true" ]]; then - _apply_normalise > "${BATS_TEST_DIRNAME}/resources/maximal-run-full-diff.out" + _apply_normalise >"${BATS_TEST_DIRNAME}/resources/maximal-run-full-diff.out" return fi diff --git a/tests/unit/general/schema.bats b/tests/unit/general/schema.bats index 5e0a6c8331..059c6d78e1 100644 --- a/tests/unit/general/schema.bats +++ b/tests/unit/general/schema.bats @@ -10,7 +10,7 @@ setup() { } find_schemas() { - readarray -t schemas <<< "$(find "${ROOT}/config/schemas/" -type f -name '*.yaml')" + readarray -t schemas <<<"$(find "${ROOT}/config/schemas/" -type f -name '*.yaml')" } @test "root schemas should have titles" { diff --git a/tests/unit/openstack/validate-capi-air-gapped.gen.bats b/tests/unit/openstack/validate-capi-air-gapped.gen.bats index 49deff332a..e5e68b3797 100644 --- a/tests/unit/openstack/validate-capi-air-gapped.gen.bats +++ b/tests/unit/openstack/validate-capi-air-gapped.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - openstack:capi:air-gapped - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - openstack:capi:air-gapped - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - openstack:capi:air-gapped - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - openstack:capi:air-gapped - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/openstack/validate-capi-dev.gen.bats b/tests/unit/openstack/validate-capi-dev.gen.bats index f155577113..6483f575a9 100644 --- a/tests/unit/openstack/validate-capi-dev.gen.bats +++ b/tests/unit/openstack/validate-capi-dev.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - openstack:capi:dev - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - openstack:capi:dev - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - openstack:capi:dev - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - openstack:capi:dev - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/openstack/validate-capi-prod.gen.bats b/tests/unit/openstack/validate-capi-prod.gen.bats index 1b618f9537..7c11e0e368 100644 --- a/tests/unit/openstack/validate-capi-prod.gen.bats +++ b/tests/unit/openstack/validate-capi-prod.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - openstack:capi:prod - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - openstack:capi:prod - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - openstack:capi:prod - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - openstack:capi:prod - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/openstack/validate-kubespray-air-gapped.gen.bats b/tests/unit/openstack/validate-kubespray-air-gapped.gen.bats index 4d009da958..9e813bf9a3 100644 --- a/tests/unit/openstack/validate-kubespray-air-gapped.gen.bats +++ b/tests/unit/openstack/validate-kubespray-air-gapped.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - openstack:kubespray:air-gapped - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - openstack:kubespray:air-gapped - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - openstack:kubespray:air-gapped - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - openstack:kubespray:air-gapped - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/openstack/validate-kubespray-dev.gen.bats b/tests/unit/openstack/validate-kubespray-dev.gen.bats index 674cfc5337..fd8d174a38 100644 --- a/tests/unit/openstack/validate-kubespray-dev.gen.bats +++ b/tests/unit/openstack/validate-kubespray-dev.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - openstack:kubespray:dev - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - openstack:kubespray:dev - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - openstack:kubespray:dev - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - openstack:kubespray:dev - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/openstack/validate-kubespray-prod.gen.bats b/tests/unit/openstack/validate-kubespray-prod.gen.bats index ad9dbb7d30..11d6a6273a 100644 --- a/tests/unit/openstack/validate-kubespray-prod.gen.bats +++ b/tests/unit/openstack/validate-kubespray-prod.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - openstack:kubespray:prod - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - openstack:kubespray:prod - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - openstack:kubespray:prod - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - openstack:kubespray:prod - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/safespring/validate-capi-air-gapped.gen.bats b/tests/unit/safespring/validate-capi-air-gapped.gen.bats index d5025d580f..27bb8ec85f 100644 --- a/tests/unit/safespring/validate-capi-air-gapped.gen.bats +++ b/tests/unit/safespring/validate-capi-air-gapped.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - safespring:capi:air-gapped - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - safespring:capi:air-gapped - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - safespring:capi:air-gapped - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - safespring:capi:air-gapped - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/safespring/validate-capi-dev.gen.bats b/tests/unit/safespring/validate-capi-dev.gen.bats index fb205e1c84..2fe1ab49f2 100644 --- a/tests/unit/safespring/validate-capi-dev.gen.bats +++ b/tests/unit/safespring/validate-capi-dev.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - safespring:capi:dev - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - safespring:capi:dev - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - safespring:capi:dev - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - safespring:capi:dev - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/safespring/validate-capi-prod.gen.bats b/tests/unit/safespring/validate-capi-prod.gen.bats index 4c29146998..55665c8451 100644 --- a/tests/unit/safespring/validate-capi-prod.gen.bats +++ b/tests/unit/safespring/validate-capi-prod.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - safespring:capi:prod - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - safespring:capi:prod - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - safespring:capi:prod - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - safespring:capi:prod - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/safespring/validate-kubespray-air-gapped.gen.bats b/tests/unit/safespring/validate-kubespray-air-gapped.gen.bats index 82320051c6..74949344a7 100644 --- a/tests/unit/safespring/validate-kubespray-air-gapped.gen.bats +++ b/tests/unit/safespring/validate-kubespray-air-gapped.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - safespring:kubespray:air-gapped - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - safespring:kubespray:air-gapped - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - safespring:kubespray:air-gapped - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - safespring:kubespray:air-gapped - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/safespring/validate-kubespray-dev.gen.bats b/tests/unit/safespring/validate-kubespray-dev.gen.bats index 164a92f5c9..cedec10d6b 100644 --- a/tests/unit/safespring/validate-kubespray-dev.gen.bats +++ b/tests/unit/safespring/validate-kubespray-dev.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - safespring:kubespray:dev - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - safespring:kubespray:dev - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - safespring:kubespray:dev - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - safespring:kubespray:dev - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/safespring/validate-kubespray-prod.gen.bats b/tests/unit/safespring/validate-kubespray-prod.gen.bats index aaa07b7cc4..c72a281b27 100644 --- a/tests/unit/safespring/validate-kubespray-prod.gen.bats +++ b/tests/unit/safespring/validate-kubespray-prod.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - safespring:kubespray:prod - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - safespring:kubespray:prod - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - safespring:kubespray:prod - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - safespring:kubespray:prod - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/templates/releases.bash b/tests/unit/templates/releases.bash index 50d8ef2dc4..e3d4cfbcc9 100644 --- a/tests/unit/templates/releases.bash +++ b/tests/unit/templates/releases.bash @@ -8,7 +8,6 @@ helmfile_build_releases() { local releases expression releases="${CK8S_CONFIG_PATH}/pre-build/${1}.yaml" - if ! [[ -f "${releases}" ]]; then mkdir -p "$(dirname "${releases}")" @@ -31,10 +30,10 @@ helmfile_build_releases() { case "${1:-}" in sc) - helmfile -e service_cluster -f "${ROOT}/helmfile.d" -q build | yq -oj -I0 "${expression}" > "${target}" + helmfile -e service_cluster -f "${ROOT}/helmfile.d" -q build | yq -oj -I0 "${expression}" >"${target}" ;; wc) - helmfile -e workload_cluster -f "${ROOT}/helmfile.d" -q build | yq -oj -I0 "${expression}" > "${target}" + helmfile -e workload_cluster -f "${ROOT}/helmfile.d" -q build | yq -oj -I0 "${expression}" >"${target}" ;; *) echo "error: usage: helmfile_build_releases " @@ -68,7 +67,7 @@ helmfile_build_needs() { .[] | select( .selector | match(\"${incoming// /|}\") ) | .needs + [.selector] | .[] - ] | sort | unique | join(\" \") " <<< "${releases}")" + ] | sort | unique | join(\" \") " <<<"${releases}")" done echo "${outgoing// /$'\n'}" @@ -107,7 +106,7 @@ helmfile_template_release() { namespace="${namespace%%,name=*}" local -a files - readarray -t files <<< "$(find "${release}" -type f -name '*.yaml')" + readarray -t files <<<"$(find "${release}" -type f -name '*.yaml')" for file in "${files[@]}"; do yq ".metadata.namespace = (.metadata.namespace // \"${namespace}\")" "${file}" @@ -123,7 +122,7 @@ helmfile_template_release() { # caches the results helmfile_template_release_needs() { local -a selectors - readarray -t selectors <<< "$(helmfile_build_needs "${1}" "${2}")" + readarray -t selectors <<<"$(helmfile_build_needs "${1}" "${2}")" local selector for selector in "${selectors[@]}"; do @@ -143,7 +142,7 @@ releases_have_through_needs() { created_expression="${3}" local -a selectors - readarray -t selectors <<< "$(helmfile_build_releases "${cluster}" | yq -r -oj -I0 '.[] | "namespace=" + .namespace + ",name=" + .name')" + readarray -t selectors <<<"$(helmfile_build_releases "${cluster}" | yq -r -oj -I0 '.[] | "namespace=" + .namespace + ",name=" + .name')" local fail=false @@ -157,7 +156,7 @@ releases_have_through_needs() { local uniques # print used resources once and created resources twice, then filter on totally unique identifiers - uniques="$(tr ' ' '\n' <<< "${used_resources} ${created_resources} ${created_resources}" | sort | uniq -u | tr '\n' ' ')" + uniques="$(tr ' ' '\n' <<<"${used_resources} ${created_resources} ${created_resources}" | sort | uniq -u | tr '\n' ' ')" # if we have a totally unique identifier then we have an issue if [[ -n "${uniques%% }" ]]; then @@ -173,7 +172,7 @@ releases_have_through_needs() { release_with_custom_resources_have_validation_on_install_disabled() { local -a selectors - readarray -t selectors <<< "$(helmfile_build_releases "${1}" | yq -r -oj -I0 '.[] | select(.disableValidationOnInstall != true) | "namespace=" + .namespace + ",name=" + .name')" + readarray -t selectors <<<"$(helmfile_build_releases "${1}" | yq -r -oj -I0 '.[] | select(.disableValidationOnInstall != true) | "namespace=" + .namespace + ",name=" + .name')" local fail=false diff --git a/tests/unit/templates/validate.bash b/tests/unit/templates/validate.bash index a9dc5efdd6..fbf5123b1b 100644 --- a/tests/unit/templates/validate.bash +++ b/tests/unit/templates/validate.bash @@ -3,6 +3,6 @@ helmfile_template_kubeconform() { mkdir -p /tmp/compliantkubernetes-apps-tests-unit-kubeconform - helmfile -e "${1}" -f "${ROOT}/helmfile.d/" -q template | \ + helmfile -e "${1}" -f "${ROOT}/helmfile.d/" -q template | kubeconform -cache /tmp/compliantkubernetes-apps-tests-unit-kubeconform -ignore-missing-schemas -schema-location default -schema-location 'https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json' -strict -summary - } diff --git a/tests/unit/templates/validate.bats.gotmpl b/tests/unit/templates/validate.bats.gotmpl index fa8b785e63..6ce0efc57b 100644 --- a/tests/unit/templates/validate.bats.gotmpl +++ b/tests/unit/templates/validate.bats.gotmpl @@ -39,27 +39,27 @@ teardown_file() { } @test "configuration is valid - {{ .provider }}:{{ .installer }}:{{ .flavor }} - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - {{ .provider }}:{{ .installer }}:{{ .flavor }} - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - {{ .provider }}:{{ .installer }}:{{ .flavor }} - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - {{ .provider }}:{{ .installer }}:{{ .flavor }} - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/upcloud/validate-capi-air-gapped.gen.bats b/tests/unit/upcloud/validate-capi-air-gapped.gen.bats index 69c726e856..beeb508204 100644 --- a/tests/unit/upcloud/validate-capi-air-gapped.gen.bats +++ b/tests/unit/upcloud/validate-capi-air-gapped.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - upcloud:capi:air-gapped - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - upcloud:capi:air-gapped - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - upcloud:capi:air-gapped - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - upcloud:capi:air-gapped - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/upcloud/validate-capi-dev.gen.bats b/tests/unit/upcloud/validate-capi-dev.gen.bats index 6769ab1dfc..e39d4675a2 100644 --- a/tests/unit/upcloud/validate-capi-dev.gen.bats +++ b/tests/unit/upcloud/validate-capi-dev.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - upcloud:capi:dev - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - upcloud:capi:dev - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - upcloud:capi:dev - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - upcloud:capi:dev - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/upcloud/validate-capi-prod.gen.bats b/tests/unit/upcloud/validate-capi-prod.gen.bats index b3be485977..58a52fc6cc 100644 --- a/tests/unit/upcloud/validate-capi-prod.gen.bats +++ b/tests/unit/upcloud/validate-capi-prod.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - upcloud:capi:prod - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - upcloud:capi:prod - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - upcloud:capi:prod - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - upcloud:capi:prod - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/upcloud/validate-kubespray-air-gapped.gen.bats b/tests/unit/upcloud/validate-kubespray-air-gapped.gen.bats index 2550c35cdb..cbc3f14f7a 100644 --- a/tests/unit/upcloud/validate-kubespray-air-gapped.gen.bats +++ b/tests/unit/upcloud/validate-kubespray-air-gapped.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - upcloud:kubespray:air-gapped - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - upcloud:kubespray:air-gapped - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - upcloud:kubespray:air-gapped - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - upcloud:kubespray:air-gapped - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/upcloud/validate-kubespray-dev.gen.bats b/tests/unit/upcloud/validate-kubespray-dev.gen.bats index 66c4a7fc70..fea8878871 100644 --- a/tests/unit/upcloud/validate-kubespray-dev.gen.bats +++ b/tests/unit/upcloud/validate-kubespray-dev.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - upcloud:kubespray:dev - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - upcloud:kubespray:dev - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - upcloud:kubespray:dev - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - upcloud:kubespray:dev - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' } diff --git a/tests/unit/upcloud/validate-kubespray-prod.gen.bats b/tests/unit/upcloud/validate-kubespray-prod.gen.bats index b218fc6692..7dbd2d3a08 100644 --- a/tests/unit/upcloud/validate-kubespray-prod.gen.bats +++ b/tests/unit/upcloud/validate-kubespray-prod.gen.bats @@ -36,27 +36,27 @@ teardown_file() { } @test "configuration is valid - upcloud:kubespray:prod - service cluster" { - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_success } @test "configuration is valid - upcloud:kubespray:prod - workload cluster" { - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_success } @test "configuration is invalid - upcloud:kubespray:prod - service cluster" { run yq.set 'sc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate sc <<< $'y\n' + run ck8s validate sc <<<$'y\n' assert_output --partial 'global.baseDomain' } @test "configuration is invalid - upcloud:kubespray:prod - workload cluster" { run yq.set 'wc' '.global.baseDomain' '"this is not a valid hostname"' - run ck8s validate wc <<< $'y\n' + run ck8s validate wc <<<$'y\n' assert_output --partial '"this is not a valid hostname"' }