diff --git a/logging/bin/apiaccess-include.sh b/logging/bin/apiaccess-include.sh index 31b6645a..a0d13c4e 100644 --- a/logging/bin/apiaccess-include.sh +++ b/logging/bin/apiaccess-include.sh @@ -18,242 +18,238 @@ source bin/service-url-include.sh function stop_portforwarding { - # terminate port-forwarding process if PID was cached - - local pid - pid=${1:-$pfPID} - - if [ -o errexit ]; then - restore_errexit=Y - log_debug "Toggling errexit: Off" - set +e - fi - - if ps -p "$pid" >/dev/null; then - log_debug "Killing port-forwarding process [$pid]." - kill -9 $pid - wait $pid 2>/dev/null # suppresses message reporting process has been killed - else - log_debug "No portforwarding processID found; nothing to terminate." - fi - - if [ -n "$restore_errexit" ]; then - log_debug "Toggling errexit: On" - set -e - fi + # terminate port-forwarding process if PID was cached + + local pid + pid=${1:-$pfPID} + + if [ -o errexit ]; then + restore_errexit=Y + log_debug "Toggling errexit: Off" + set +e + fi + + if ps -p "$pid" > /dev/null; then + log_debug "Killing port-forwarding process [$pid]." + kill -9 "$pid" + wait "$pid" 2> /dev/null # suppresses message reporting process has been killed + else + log_debug "No portforwarding processID found; nothing to terminate." + fi + + if [ -n "$restore_errexit" ]; then + log_debug "Toggling errexit: On" + set -e + fi } function stop_es_portforwarding { - # - # terminate ES port-forwarding process - # - # Global vars: espfpid - process id of ES portforwarding - # es_api_url - URL to access ES API/serivce - - if [ -n "$espfpid" ]; then - log_debug "ES PF PID for stopping: $espfpid" - stop_portforwarding $espfpid - unset espfpid - unset es_api_url - fi + # + # terminate ES port-forwarding process + # + # Global vars: espfpid - process id of ES portforwarding + # es_api_url - URL to access ES API/serivce + + if [ -n "$espfpid" ]; then + log_debug "ES PF PID for stopping: $espfpid" + stop_portforwarding "$espfpid" + unset espfpid + unset es_api_url + fi } function stop_kb_portforwarding { - # - # terminate KB port-forwarding process - # - # Global vars: kbpfpid - process id of KB portforwarding - # kb_api_url - URL to access KB API/serivce - - if [ -n "$kbpfpid" ]; then - log_debug "KB PF PID for stopping: $kbpfpid" - stop_portforwarding $kbpfpid - unset kbpfpid - unset kb_api_url - fi - } + # + # terminate KB port-forwarding process + # + # Global vars: kbpfpid - process id of KB portforwarding + # kb_api_url - URL to access KB API/serivce + + if [ -n "$kbpfpid" ]; then + log_debug "KB PF PID for stopping: $kbpfpid" + stop_portforwarding "$kbpfpid" + unset kbpfpid + unset kb_api_url + fi +} function get_api_url { - # - # determine URL to access specified API/service - # - # Global vars: api_url - URL to access requested API/serivce - # pfPID - process id used for portforwarding - # - local servicename portpath usetls serviceport - servicename=$1 - portpath=$2 - usetls=${3:-false} - ingress=$4 - - api_url=$(get_service_url "$LOG_NS" "$servicename" "$usetls" $ingress) - - if [ -z "$api_url" ] || [ "$LOG_ALWAYS_PORT_FORWARD" == "true" ]; then - # set up temporary port forwarding to allow curl access - log_debug "Will use Kubernetes port-forwarding to access" - log_debug "LOG_ALWAYS_PORT_FORWARD: $LOG_ALWAYS_PORT_FORWARD api_url: $api_url" - - serviceport=$(kubectl -n $LOG_NS get service $servicename -o=jsonpath=$portpath) - log_debug "serviceport: $serviceport" - - # command is sent to run in background - - kubectl -n $LOG_NS port-forward --address localhost svc/$servicename :$serviceport > $tmpfile 2>/dev/null & - - # get PID to allow us to kill process later - pfPID=$! - log_debug "pfPID: $pfPID" - - # pause to allow port-forwarding messages to appear - sleep 5 - - # determine which port port-forwarding is using - pfRegex='Forwarding from .+:([0-9]+)' - myline=$(head -n1 $tmpfile) - - if [[ $myline =~ $pfRegex ]]; then - TEMP_PORT="${BASH_REMATCH[1]}"; - log_debug "TEMP_PORT=${TEMP_PORT}" - else - log_error "Unable to identify the temporary port used for port-forwarding [$servicename]; exiting script."; - return 1 - fi - - if [ "$usetls" == "true" ]; then - protocol="https" - else - protocol="http" - fi - - api_url="$protocol://localhost:$TEMP_PORT" - - fi - log_debug "API Endpoint for [$servicename]: $api_url" + # + # determine URL to access specified API/service + # + # Global vars: api_url - URL to access requested API/serivce + # pfPID - process id used for portforwarding + # + local servicename portpath usetls serviceport + servicename=$1 + portpath=$2 + usetls=${3:-false} + ingress=$4 + + api_url=$(get_service_url "$LOG_NS" "$servicename" "$usetls" "$ingress") + + if [ -z "$api_url" ] || [ "$LOG_ALWAYS_PORT_FORWARD" == "true" ]; then + # set up temporary port forwarding to allow curl access + log_debug "Will use Kubernetes port-forwarding to access" + log_debug "LOG_ALWAYS_PORT_FORWARD: $LOG_ALWAYS_PORT_FORWARD api_url: $api_url" + + serviceport=$(kubectl -n "$LOG_NS" get service "$servicename" -o=jsonpath="$portpath") + log_debug "serviceport: $serviceport" + + # command is sent to run in background + + kubectl -n "$LOG_NS" port-forward --address localhost svc/"$servicename" :"$serviceport" > "$tmpfile" 2> /dev/null & + + # get PID to allow us to kill process later + pfPID=$! + log_debug "pfPID: $pfPID" + + # pause to allow port-forwarding messages to appear + sleep 5 + + # determine which port port-forwarding is using + pfRegex='Forwarding from .+:([0-9]+)' + myline=$(head -n1 "$tmpfile") + + if [[ $myline =~ $pfRegex ]]; then + TEMP_PORT="${BASH_REMATCH[1]}" + log_debug "TEMP_PORT=${TEMP_PORT}" + else + log_error "Unable to identify the temporary port used for port-forwarding [$servicename]; exiting script." + return 1 + fi + + if [ "$usetls" == "true" ]; then + protocol="https" + else + protocol="http" + fi + + api_url="$protocol://localhost:$TEMP_PORT" + + fi + log_debug "API Endpoint for [$servicename]: $api_url" } - function get_es_api_url { - # - # obtain ES API/service URL (establish port-forwarding, if necessary) - # - # Global vars: es_api_url - URL to access ES API/serivce - # espfpid - process id of ES portforwarding - - if [ -n "$es_api_url" ]; then - log_debug "Elasticsearch API Endpoint already set [$es_api_url]" - return 0 - fi - - pfPID="" - get_api_url "$ES_SERVICENAME" '{.spec.ports[?(@.name=="http")].port}' true - - rc=$? - - if [ "$rc" == "0" ]; then - es_api_url=$api_url - espfpid=$pfPID - trap_add stop_es_portforwarding EXIT - return 0 - else - log_error "Unable to obtain the URL for the Elasticsearch API Endpoint" - return 1 - fi + # + # obtain ES API/service URL (establish port-forwarding, if necessary) + # + # Global vars: es_api_url - URL to access ES API/serivce + # espfpid - process id of ES portforwarding + + if [ -n "$es_api_url" ]; then + log_debug "Elasticsearch API Endpoint already set [$es_api_url]" + return 0 + fi + + pfPID="" + get_api_url "$ES_SERVICENAME" '{.spec.ports[?(@.name=="http")].port}' true + + rc=$? + + if [ "$rc" == "0" ]; then + es_api_url=$api_url + espfpid=$pfPID + trap_add stop_es_portforwarding EXIT + return 0 + else + log_error "Unable to obtain the URL for the Elasticsearch API Endpoint" + return 1 + fi } function get_kb_api_url { - # - # obtain KB API/service URL (establish port-forwarding, if necessary) - # - # Global vars: kb_api_url - URL to access KB API/service - # kbpfpid - process id of KB portforwarding - - - if [ -n "$kb_api_url" ]; then - log_debug "Kibana API Endpoint already set [$kb_api_url]" - return 0 - fi - - pfPID="" - - tlsrequired="$(kubectl -n $LOG_NS get secret v4m-osd-tls-enabled -o=jsonpath={.data.enable_tls} |base64 --decode)" - log_debug "TLS required to connect to Kibana? [$tlsrequired]" - - get_api_url "$KB_SERVICENAME" '{.spec.ports[?(@.name=="'${KB_SERVICEPORT}'")].port}' $tlsrequired $KB_INGRESSNAME - rc=$? - - if [ "$rc" == "0" ]; then - kb_api_url=$api_url - kbpfpid=$pfPID - trap_add stop_kb_portforwarding EXIT - return 0 - else - log_error "Unable to obtain the URL for the OpenSearch Dashboards API Endpoint" - return 1 - fi + # + # obtain KB API/service URL (establish port-forwarding, if necessary) + # + # Global vars: kb_api_url - URL to access KB API/service + # kbpfpid - process id of KB portforwarding + + if [ -n "$kb_api_url" ]; then + log_debug "Kibana API Endpoint already set [$kb_api_url]" + return 0 + fi + + pfPID="" + + tlsrequired="$(kubectl -n "$LOG_NS" get secret v4m-osd-tls-enabled -o=jsonpath="{.data.enable_tls}" | base64 --decode)" + log_debug "TLS required to connect to Kibana? [$tlsrequired]" + + get_api_url "$KB_SERVICENAME" '{.spec.ports[?(@.name=="'"${KB_SERVICEPORT}"'")].port}' "$tlsrequired" "$KB_INGRESSNAME" + rc=$? + + if [ "$rc" == "0" ]; then + kb_api_url=$api_url + kbpfpid=$pfPID + trap_add stop_kb_portforwarding EXIT + return 0 + else + log_error "Unable to obtain the URL for the OpenSearch Dashboards API Endpoint" + return 1 + fi } function get_sec_api_url { - # - # obtain ODFE Security API/service URL (calls get_es_api_url function, if necessary) - # - # Global vars: sec_api_url - URL to access ODFE Security API/serivce - - if [ -n "$sec_api_url" ]; then - log_debug "Security API Endpoint already set [$sec_api_url]" - return 0 - fi - - get_es_api_url - rc=$? - - if [ "$rc" == "0" ]; then - sec_api_url="${es_api_url}/$ES_PLUGINS_DIR/_security/api" - - log_debug "Security API Endpoint: [$sec_api_url]" - return 0 - else - sec_api_url="" - log_error "Unable to obtain the URL for the Security API Endpoint" - return 1 - fi + # + # obtain ODFE Security API/service URL (calls get_es_api_url function, if necessary) + # + # Global vars: sec_api_url - URL to access ODFE Security API/serivce + + if [ -n "$sec_api_url" ]; then + log_debug "Security API Endpoint already set [$sec_api_url]" + return 0 + fi + + get_es_api_url + rc=$? + + if [ "$rc" == "0" ]; then + sec_api_url="${es_api_url}/$ES_PLUGINS_DIR/_security/api" + + log_debug "Security API Endpoint: [$sec_api_url]" + return 0 + else + sec_api_url="" + log_error "Unable to obtain the URL for the Security API Endpoint" + return 1 + fi } function get_ism_api_url { - # - # obtain Index State Managment API/service URL (calls get_es_api_url function, if necessary) - # - # Global vars: ism_api_url - URL to access ISM API/serivce - - if [ -n "$ism_api_url" ]; then - log_debug "Index Statement Management API Endpoint already set [$ism_api_url]" - return 0 - fi - - get_es_api_url - rc=$? - - if [ "$rc" == "0" ]; then - ism_api_url="${es_api_url}/$ES_PLUGINS_DIR/_ism" - - log_debug "Index State Management API Endpoint: [$ism_api_url]" - return 0 - else - ism_api_url="" - log_error "Unable to obtain the URL for the Index State Management API Endpoint" - return 1 - fi + # + # obtain Index State Managment API/service URL (calls get_es_api_url function, if necessary) + # + # Global vars: ism_api_url - URL to access ISM API/serivce + + if [ -n "$ism_api_url" ]; then + log_debug "Index Statement Management API Endpoint already set [$ism_api_url]" + return 0 + fi + + get_es_api_url + rc=$? + + if [ "$rc" == "0" ]; then + ism_api_url="${es_api_url}/$ES_PLUGINS_DIR/_ism" + + log_debug "Index State Management API Endpoint: [$ism_api_url]" + return 0 + else + ism_api_url="" + log_error "Unable to obtain the URL for the Index State Management API Endpoint" + return 1 + fi } - export -f get_ism_api_url get_sec_api_url stop_portforwarding get_es_api_url get_kb_api_url stop_es_portforwarding stop_kb_portforwarding - #initialize "global" vars LOG_ALWAYS_PORT_FORWARD=${LOG_ALWAYS_PORT_FORWARD:-true} export es_api_url kb_api_url espfpid kbpfpid sec_api_url pfPID LOG_ALWAYS_PORT_FORWARD #create a temp file to hold curl response if [ -z "$tmpfile" ]; then - tmpfile=$TMP_DIR/curl_response.txt + tmpfile=$TMP_DIR/curl_response.txt fi diff --git a/logging/bin/change_internal_password.sh b/logging/bin/change_internal_password.sh index a868d8a4..15577c3f 100755 --- a/logging/bin/change_internal_password.sh +++ b/logging/bin/change_internal_password.sh @@ -3,28 +3,30 @@ # Copyright © 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 +# shellcheck disable=SC2034 CHECK_HELM=false + source logging/bin/common.sh source logging/bin/secrets-include.sh source logging/bin/apiaccess-include.sh -this_script=`basename "$0"` +this_script=$(basename "$0") function show_usage { - log_info "" - log_info "Usage: $this_script USERNAME [PASSWORD] " - log_info "" - log_info "Changes the password for one of the special internal user accounts used by other components of the monitoring system to communicate " - log_info "with OpenSearch. In addition, the script upates the internal cache (i.e. corresponding Kubernetes secret) with the new value." - log_info "" - log_info " USERNAME - REQUIRED; the internal username for which the password is be changed; " - log_info " MUST be one of: admin, kibanaserver, logadm, logcollector or metricgetter" - log_info "" - log_info " PASSWORD - OPTIONAL; the new password. If not provided, a random 32-character password will be generated." - log_info " Note: PASSWORD is REQUIRED when USERNAME is 'logadm'." - log_info "" - echo "" + log_info "" + log_info "Usage: $this_script USERNAME [PASSWORD] " + log_info "" + log_info "Changes the password for one of the special internal user accounts used by other components of the monitoring system to communicate " + log_info "with OpenSearch. In addition, the script upates the internal cache (i.e. corresponding Kubernetes secret) with the new value." + log_info "" + log_info " USERNAME - REQUIRED; the internal username for which the password is be changed; " + log_info " MUST be one of: admin, kibanaserver, logadm, logcollector or metricgetter" + log_info "" + log_info " PASSWORD - OPTIONAL; the new password. If not provided, a random 32-character password will be generated." + log_info " Note: PASSWORD is REQUIRED when USERNAME is 'logadm'." + log_info "" + echo "" } # set vars used in curl commands @@ -37,246 +39,239 @@ NEW_PASSWD="${2}" # if no user_name; ERROR and EXIT if [ "$USER_NAME" == "" ]; then - log_error "Required argument [USER_NAME] not provided." - exit 1 + log_error "Required argument [USER_NAME] not provided." + exit 1 else - case "$USER_NAME" in - admin) - ;; - logcollector) - ;; - logadm) - if [ -z "$NEW_PASSWD" ]; then - log_error "No password provided. A new password is REQUIRED when using this script to change the [logadm] account password" - exit 1 - fi - ;; - kibanaserver) - ;; - metricgetter) - ;; - --help|-h) - show_usage - exit - ;; - *) - log_error "The user name [$USER_NAME] you provided is not one of the supported internal users; exiting" - show_usage - exit 2 - ;; - esac + case "$USER_NAME" in + admin) ;; + logcollector) ;; + logadm) + if [ -z "$NEW_PASSWD" ]; then + log_error "No password provided. A new password is REQUIRED when using this script to change the [logadm] account password" + exit 1 + fi + ;; + kibanaserver) ;; + metricgetter) ;; + --help | -h) + show_usage + exit + ;; + *) + log_error "The user name [$USER_NAME] you provided is not one of the supported internal users; exiting" + show_usage + exit 2 + ;; + esac fi - if [ "$NEW_PASSWD" == "" ]; then - # generate password if one not provided - NEW_PASSWD="$(randomPassword)" - autogenerated_password="true" + # generate password if one not provided + NEW_PASSWD="$(randomPassword)" + autogenerated_password="true" fi if [ "$USER_NAME" != "logadm" ]; then - #get current credentials from Kubernetes secret - if [ -z "$(kubectl -n $LOG_NS get secret internal-user-$USER_NAME -o=name 2>/dev/null)" ]; then - log_warn "The Kubernetes secret [internal-user-$USER_NAME], containing credentials for the user, was not found." - # TO DO: How to handle case where secret does not exist? Should never happen. - # exit - ES_USER=$USER_NAME - else - ES_USER=$(kubectl -n $LOG_NS get secret internal-user-$USER_NAME -o=jsonpath="{.data.\username}" |base64 --decode) - ES_PASSWD=$(kubectl -n $LOG_NS get secret internal-user-$USER_NAME -o=jsonpath="{.data.password}" |base64 --decode) - fi + #get current credentials from Kubernetes secret + if [ -z "$(kubectl -n "$LOG_NS" get secret internal-user-"$USER_NAME" -o=name 2> /dev/null)" ]; then + log_warn "The Kubernetes secret [internal-user-$USER_NAME], containing credentials for the user, was not found." + # TO DO: How to handle case where secret does not exist? Should never happen. + # exit + ES_USER=$USER_NAME + else + ES_USER=$(kubectl -n "$LOG_NS" get secret internal-user-"$USER_NAME" -o=jsonpath="{.data.\username}" | base64 --decode) + ES_PASSWD=$(kubectl -n "$LOG_NS" get secret internal-user-"$USER_NAME" -o=jsonpath="{.data.password}" | base64 --decode) + fi else - ES_USER=$USER_NAME - ES_PASSWD="do_not_know_current_password" + ES_USER=$USER_NAME + ES_PASSWD="do_not_know_current_password" fi get_sec_api_url # Attempt to change password using current user credentials -response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$sec_api_url/account" -H 'Content-Type: application/json' -d'{"current_password" : "'"$ES_PASSWD"'", "password" : "'"$NEW_PASSWD"'"}' --user "$ES_USER:$ES_PASSWD" --insecure) +response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$sec_api_url/account" -H 'Content-Type: application/json' -d'{"current_password" : "'"$ES_PASSWD"'", "password" : "'"$NEW_PASSWD"'"}' --user "$ES_USER:$ES_PASSWD" --insecure) if [[ $response == 4* ]]; then - if [ "$USER_NAME" != "logadm" ]; then - log_warn "The currently stored credentials for [$USER_NAME] do NOT appear to be up-to-date; unable to use them to change password. [$response]" - fi + if [ "$USER_NAME" != "logadm" ]; then + log_warn "The currently stored credentials for [$USER_NAME] do NOT appear to be up-to-date; unable to use them to change password. [$response]" + fi - if [ "$USER_NAME" != "admin" ]; then + if [ "$USER_NAME" != "admin" ]; then + log_debug "Will attempt to use admin credentials to change password for [$USER_NAME]" - log_debug "Will attempt to use admin credentials to change password for [$USER_NAME]" + ES_ADMIN_USER=$(kubectl -n "$LOG_NS" get secret internal-user-admin -o=jsonpath="{.data.username}" | base64 --decode) + ES_ADMIN_PASSWD=$(kubectl -n "$LOG_NS" get secret internal-user-admin -o=jsonpath="{.data.password}" | base64 --decode) - ES_ADMIN_USER=$(kubectl -n $LOG_NS get secret internal-user-admin -o=jsonpath="{.data.username}" |base64 --decode) - ES_ADMIN_PASSWD=$(kubectl -n $LOG_NS get secret internal-user-admin -o=jsonpath="{.data.password}" |base64 --decode) - - # make sure hash utility is executable - kubectl -n $LOG_NS exec $targetpod -c $targetcontainer -- chmod +x $toolsrootdir/tools/hash.sh - # get hash of new password - hashed_passwd=$(kubectl -n $LOG_NS exec $targetpod -c $targetcontainer -- $toolsrootdir/tools/hash.sh -p $NEW_PASSWD|grep -v '*') - rc=$? - if [ "$rc" == "0" ]; then - - #try changing password using admin password - response=$(curl -s -o /dev/null -w "%{http_code}" -XPATCH "$sec_api_url/internalusers/$ES_USER" -H 'Content-Type: application/json' -d'[{"op" : "replace", "path" : "/hash", "value" : "'"$hashed_passwd"'"}]' --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - if [[ "$response" == "404" ]]; then - log_error "Unable to change password for [$USER_NAME] because that user does not exist. [$response]" - success="non-existent_user" - elif [[ $response == 4* ]]; then - log_error "The Kubernetes secret containing credentials for the [admin] user appears to be out-of-date. [$response]" - echo "" - log_error " *********** IMPORTANT NOTE ***********" - log_error "" - log_error " Cached credentials for [admin] user are not valid!" - log_error "" - log_error " It is VERY IMPORTANT to ensure the credentials for the [admin] account and the corresponding" - log_error " Kubernetes secret [internal-user-admin] in the [$LOG_NS] namespace are ALWAYS synchronized." - log_error "" - log_error " You MUST re-run this script NOW with the updated password for the [admin] account to update" - log_error " the secret with the current password." - log_error "" - log_error " You may then run this script again to update the password for the [$USER_NAME] account." - echo "" - success="false" - elif [[ $response == 2* ]]; then - log_debug "Password for [$USER_NAME] has been changed in OpenSearch. [$response]" - success="true" - else - log_warn "Unable to change password for [$USER_NAME] using [admin] credentials. [$response]" - success="false" - fi - else - log_error "Unable to obtain a hash of the new password; password not changed. [rc: $rc]"; - fi - else - log_debug "Attempting to change password for user [admin] using the admin certs rather than cached password" - - # make sure hash utility is executable - kubectl -n $LOG_NS exec $targetpod -c $targetcontainer -- chmod +x $toolsrootdir/tools/hash.sh - # get hash of new password - hashed_passwd=$(kubectl -n $LOG_NS exec $targetpod -c $targetcontainer -- $toolsrootdir/tools/hash.sh -p $NEW_PASSWD|grep -v '*') + # get hash of new password + # shellcheck disable=2063 + hashed_passwd=$(kubectl -n "$LOG_NS" exec $targetpod -c $targetcontainer -- $toolsrootdir/tools/hash.sh -p "$NEW_PASSWD" | grep -v '*') + rc=$? + if [ "$rc" == "0" ]; then + #try changing password using admin password + response=$(curl -s -o /dev/null -w "%{http_code}" -XPATCH "$sec_api_url/internalusers/$ES_USER" -H 'Content-Type: application/json' -d'[{"op" : "replace", "path" : "/hash", "value" : "'"$hashed_passwd"'"}]' --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + if [[ $response == "404" ]]; then + log_error "Unable to change password for [$USER_NAME] because that user does not exist. [$response]" + success="non-existent_user" + elif [[ $response == 4* ]]; then + log_error "The Kubernetes secret containing credentials for the [admin] user appears to be out-of-date. [$response]" + echo "" + log_error " *********** IMPORTANT NOTE ***********" + log_error "" + log_error " Cached credentials for [admin] user are not valid!" + log_error "" + log_error " It is VERY IMPORTANT to ensure the credentials for the [admin] account and the corresponding" + log_error " Kubernetes secret [internal-user-admin] in the [$LOG_NS] namespace are ALWAYS synchronized." + log_error "" + log_error " You MUST re-run this script NOW with the updated password for the [admin] account to update" + log_error " the secret with the current password." + log_error "" + log_error " You may then run this script again to update the password for the [$USER_NAME] account." + echo "" + success="false" + elif [[ $response == 2* ]]; then + log_debug "Password for [$USER_NAME] has been changed in OpenSearch. [$response]" + success="true" + else + log_warn "Unable to change password for [$USER_NAME] using [admin] credentials. [$response]" + success="false" + fi + else + log_error "Unable to obtain a hash of the new password; password not changed. [rc: $rc]" + fi + else + log_debug "Attempting to change password for user [admin] using the admin certs rather than cached password" - #obtain admin cert - rm -f $TMP_DIR/tls.crt - admin_tls_cert=$(kubectl -n $LOG_NS get secrets es-admin-tls-secret -o "jsonpath={.data['tls\.crt']}") - if [ -z "$admin_tls_cert" ]; then - log_error "Unable to obtain admin certs from secret [es-admin-tls-secret] in the [$LOG_NS] namespace. Password for [$USER_NAME] has NOT been changed." - success="false" - else - log_debug "File tls.crt obtained from Kubernetes secret" - echo "$admin_tls_cert" |base64 --decode > $TMP_DIR/admin_tls.crt + # get hash of new password + # shellcheck disable=2063 + hashed_passwd=$(kubectl -n "$LOG_NS" exec $targetpod -c $targetcontainer -- $toolsrootdir/tools/hash.sh -p "$NEW_PASSWD" | grep -v '*') - #obtain admin TLS key - rm -f $TMP_DIR/tls.key - admin_tls_key=$(kubectl -n $LOG_NS get secrets es-admin-tls-secret -o "jsonpath={.data['tls\.key']}") - if [ -z "$admin_tls_key" ]; then - log_error "Unable to obtain admin cert key from secret [es-admin-tls-secret] in the [$LOG_NS] namespace. Password for [$USER_NAME] has NOT been changed." + #obtain admin cert + rm -f "$TMP_DIR"/tls.crt + admin_tls_cert=$(kubectl -n "$LOG_NS" get secrets es-admin-tls-secret -o "jsonpath={.data['tls\.crt']}") + if [ -z "$admin_tls_cert" ]; then + log_error "Unable to obtain admin certs from secret [es-admin-tls-secret] in the [$LOG_NS] namespace. Password for [$USER_NAME] has NOT been changed." success="false" - else - log_debug "File tls.key obtained from Kubernetes secret" - echo "$admin_tls_key" |base64 --decode > $TMP_DIR/admin_tls.key + else + log_debug "File tls.crt obtained from Kubernetes secret" + echo "$admin_tls_cert" | base64 --decode > "$TMP_DIR"/admin_tls.crt - # Attempt to change password using admin certs - response=$(curl -s -o /dev/null -w "%{http_code}" -XPATCH "$sec_api_url/internalusers/$ES_USER" -H 'Content-Type: application/json' -d'[{"op" : "replace", "path" : "/hash", "value" : "'"$hashed_passwd"'"}]' --cert $TMP_DIR/admin_tls.crt --key $TMP_DIR/admin_tls.key --insecure) - if [[ $response == 2* ]]; then - log_debug "Password for [$USER_NAME] has been changed in OpenSearch. [$response]" - success="true" + #obtain admin TLS key + rm -f "$TMP_DIR"/tls.key + admin_tls_key=$(kubectl -n "$LOG_NS" get secrets es-admin-tls-secret -o "jsonpath={.data['tls\.key']}") + if [ -z "$admin_tls_key" ]; then + log_error "Unable to obtain admin cert key from secret [es-admin-tls-secret] in the [$LOG_NS] namespace. Password for [$USER_NAME] has NOT been changed." + success="false" else - log_warn "Unable to change password for [$USER_NAME] using [admin] certificates. [$response]" - success="false" + log_debug "File tls.key obtained from Kubernetes secret" + echo "$admin_tls_key" | base64 --decode > "$TMP_DIR"/admin_tls.key + + # Attempt to change password using admin certs + response=$(curl -s -o /dev/null -w "%{http_code}" -XPATCH "$sec_api_url/internalusers/$ES_USER" -H 'Content-Type: application/json' -d'[{"op" : "replace", "path" : "/hash", "value" : "'"$hashed_passwd"'"}]' --cert "$TMP_DIR"/admin_tls.crt --key "$TMP_DIR"/admin_tls.key --insecure) + + if [[ $response == 2* ]]; then + log_debug "Password for [$USER_NAME] has been changed in OpenSearch. [$response]" + success="true" + else + log_warn "Unable to change password for [$USER_NAME] using [admin] certificates. [$response]" + success="false" + fi fi - fi - fi - fi + fi + fi elif [[ $response == 2* ]]; then - log_debug "Password change response [$response]" - success="true" + log_debug "Password change response [$response]" + success="true" else - log_error "An unexpected problem was encountered while attempting to update password for [$USER_NAME]; password not changed [$response]" - success="false" + log_error "An unexpected problem was encountered while attempting to update password for [$USER_NAME]; password not changed [$response]" + success="false" fi if [ "$success" == "true" ]; then - log_info "Successfully changed the password for [$USER_NAME] in OpenSearch internal database." + log_info "Successfully changed the password for [$USER_NAME] in OpenSearch internal database." - if [ "$USER_NAME" != "logadm" ]; then - log_debug "Trying to store the updated credentials in the corresponding Kubernetes secret [internal-user-$USER_NAME]." + if [ "$USER_NAME" != "logadm" ]; then + log_debug "Trying to store the updated credentials in the corresponding Kubernetes secret [internal-user-$USER_NAME]." - kubectl -n $LOG_NS delete secret internal-user-$USER_NAME --ignore-not-found + kubectl -n "$LOG_NS" delete secret internal-user-"$USER_NAME" --ignore-not-found - labels="managed-by=v4m-es-script" - if [ "$autogenerated_password" == "true" ]; then - labels="$labels autogenerated_password=true" - fi + labels="managed-by=v4m-es-script" + if [ "$autogenerated_password" == "true" ]; then + labels="$labels autogenerated_password=true" + fi - create_user_secret internal-user-$USER_NAME $USER_NAME $NEW_PASSWD "$labels" - rc=$? - if [ "$rc" != "0" ]; then - log_error "IMPORTANT! A Kubernetes secret holding the password for $USER_NAME no longer exists." - log_error "This WILL cause problems when the OpenSearch pods restart." - log_error "Try re-running this script again OR manually creating the secret using the command: " - log_error "kubectl -n $LOG_NS create secret generic --from-literal=username=$USER_NAME --from-literal=password=$NEW_PASSWD" - else - case $USER_NAME in - admin) + create_user_secret internal-user-"$USER_NAME" "$USER_NAME" "$NEW_PASSWD" "$labels" + rc=$? + if [ "$rc" != "0" ]; then + log_error "IMPORTANT! A Kubernetes secret holding the password for $USER_NAME no longer exists." + log_error "This WILL cause problems when the OpenSearch pods restart." + log_error "Try re-running this script again OR manually creating the secret using the command: " + log_error "kubectl -n $LOG_NS create secret generic --from-literal=username=$USER_NAME --from-literal=password=$NEW_PASSWD" + else + case $USER_NAME in + admin) - if [ "$autogenerated_password" == "true" ]; then - echo "" - log_notice " *********** IMPORTANT NOTE *********** " - log_notice "" - log_notice " The password for the OpenSearch 'admin' user was changed as requested. " - log_notice " " - log_notice " Since a new password for the 'admin' user was NOT provided, one was " - log_notice " auto-generated for the account. The generated password is shown below. " - log_notice " " - log_notice " Generated 'admin' password: $NEW_PASSWD |" - log_notice " " - log_notice " You can change the password for the 'admin' account at any time, by " - log_notice " re-running this script. " - log_notice " " - log_notice " NOTE: *NEVER* change the password for the 'admin' account from within the " - log_notice " OpenSearch Dashboards web-interface. The 'admin' password should *ONLY* be changed via " - log_notice " this script (logging/bin/change_internal_password.sh) " - echo "" - fi - ;; - logcollector) - echo "" - log_notice " *********** IMPORTANT NOTE *********** " - log_notice " " - log_notice " After changing the password for the [logcollector] user, you should restart " - log_notice " the Fluent Bit pods to ensure log collection is not interrupted. " - log_notice " " - log_notice " This can be done by submitting the following command: " - log_notice " kubectl -n $LOG_NS rollout restart daemonset v4m-fb " - echo "" - ;; - kibanaserver) - echo "" - log_notice " *********** IMPORTANT NOTE *********** " - log_notice " " - log_notice " After changing the password for the [kibanaserver] user, you need to restart the " - log_notice " OpenSearch Dashboards pod to ensure OpenSearch Dashboards can still be accessed and used. " - log_notice " " - log_notice " This can be done by submitting the following command: " - log_notice " kubectl -n $LOG_NS delete pods -l 'app=opensearch-dashboards'" - echo "" - ;; - metricgetter) - echo "" - log_notice " *********** IMPORTANT NOTE *********** " - log_notice " " - log_notice " After changing the password for the [metricgetter] user, you should restart the " - log_notice " Elasticsearch Exporter pod to ensure OpenSearch metrics continue to be collected. " - log_notice " " - log_notice " This can be done by submitting the following command: " - log_notice " kubectl -n $LOG_NS rollout restart deployment v4m-es-exporter " - echo "" - ;; - *) - log_error "The user name [$USER_NAME] you provided is not one of the supported internal users; exiting" - exit 2 - esac - fi - fi + if [ "$autogenerated_password" == "true" ]; then + echo "" + log_notice " *********** IMPORTANT NOTE *********** " + log_notice "" + log_notice " The password for the OpenSearch 'admin' user was changed as requested. " + log_notice " " + log_notice " Since a new password for the 'admin' user was NOT provided, one was " + log_notice " auto-generated for the account. The generated password is shown below. " + log_notice " " + log_notice " Generated 'admin' password: $NEW_PASSWD |" + log_notice " " + log_notice " You can change the password for the 'admin' account at any time, by " + log_notice " re-running this script. " + log_notice " " + log_notice " NOTE: *NEVER* change the password for the 'admin' account from within the " + log_notice " OpenSearch Dashboards web-interface. The 'admin' password should *ONLY* be changed via " + log_notice " this script (logging/bin/change_internal_password.sh) " + echo "" + fi + ;; + logcollector) + echo "" + log_notice " *********** IMPORTANT NOTE *********** " + log_notice " " + log_notice " After changing the password for the [logcollector] user, you should restart " + log_notice " the Fluent Bit pods to ensure log collection is not interrupted. " + log_notice " " + log_notice " This can be done by submitting the following command: " + log_notice " kubectl -n $LOG_NS rollout restart daemonset v4m-fb " + echo "" + ;; + kibanaserver) + echo "" + log_notice " *********** IMPORTANT NOTE *********** " + log_notice " " + log_notice " After changing the password for the [kibanaserver] user, you need to restart the " + log_notice " OpenSearch Dashboards pod to ensure OpenSearch Dashboards can still be accessed and used. " + log_notice " " + log_notice " This can be done by submitting the following command: " + log_notice " kubectl -n $LOG_NS delete pods -l 'app=opensearch-dashboards'" + echo "" + ;; + metricgetter) + echo "" + log_notice " *********** IMPORTANT NOTE *********** " + log_notice " " + log_notice " After changing the password for the [metricgetter] user, you should restart the " + log_notice " Elasticsearch Exporter pod to ensure OpenSearch metrics continue to be collected. " + log_notice " " + log_notice " This can be done by submitting the following command: " + log_notice " kubectl -n $LOG_NS rollout restart deployment v4m-es-exporter " + echo "" + ;; + *) + log_error "The user name [$USER_NAME] you provided is not one of the supported internal users; exiting" + exit 2 + ;; + esac + fi + fi elif [ "$success" == "false" ]; then - log_error "Unable to update the password for user [$USER_NAME] on the OpenSearch pod; original password remains in place." - exit 99 + log_error "Unable to update the password for user [$USER_NAME] on the OpenSearch pod; original password remains in place." + exit 99 fi diff --git a/logging/bin/common.sh b/logging/bin/common.sh index 3fa80e93..e34685d4 100755 --- a/logging/bin/common.sh +++ b/logging/bin/common.sh @@ -1,3 +1,5 @@ +#! /bin/bash + # Copyright © 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 @@ -8,21 +10,23 @@ if [ "$SAS_LOGGING_COMMON_SOURCED" = "" ]; then source bin/common.sh if [ -f "$USER_DIR/logging/user.env" ]; then - userEnv=$(grep -v '^[[:blank:]]*$' $USER_DIR/logging/user.env | grep -v '^#' | xargs) + userEnv=$(grep -v '^[[:blank:]]*$' "$USER_DIR/logging/user.env" | grep -v '^#' | xargs) + log_verbose "Loading user environment file: $USER_DIR/logging/user.env" if [ "$userEnv" ]; then - export $userEnv + # shellcheck disable=SC2086,SC2163 + export $userEnv fi fi #Check for obsolete env var - if [ -n "$LOG_SEARCH_BACKEND" ]; then + if [ -n "$LOG_SEARCH_BACKEND" ]; then log_error "Support for the LOG_SEARCH_BACKEND environment variable has been removed." log_error "This script is only appropriate for use with OpenSearch as the search back-end." log_error "The LOG_SEARCH_BACKEND environment variable is currently set to [$LOG_SEARCH_BACKEND]" exit 1 fi - + export LOG_NS="${LOG_NS:-logging}" #if TLS (w/in cluster; for all monitoring components) is requested, require TLS into OSD pod, too @@ -47,7 +51,7 @@ if [ "$SAS_LOGGING_COMMON_SOURCED" = "" ]; then export V4M_NS=$LOG_NS if [ "$AIRGAP_DEPLOYMENT" == "true" ]; then - source bin/airgap-include.sh + source bin/airgap-include.sh fi source bin/version-include.sh @@ -56,4 +60,3 @@ if [ "$SAS_LOGGING_COMMON_SOURCED" = "" ]; then fi echo "" - diff --git a/logging/bin/create_openshift_route.sh b/logging/bin/create_openshift_route.sh index b7802b45..9952c147 100755 --- a/logging/bin/create_openshift_route.sh +++ b/logging/bin/create_openshift_route.sh @@ -3,10 +3,10 @@ # Copyright © 2021, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh -this_script=`basename "$0"` +this_script=$(basename "$0") log_debug "Script [$this_script] has started [$(date)]" @@ -15,131 +15,98 @@ log_debug "Script [$this_script] has started [$(date)]" ################################## if [ "$OPENSHIFT_CLUSTER" != "true" ]; then - if [ "${CHECK_OPENSHIFT_CLUSTER:-true}" == "true" ]; then - log_error "This script should only be run on OpenShift clusters" - exit 1 - fi + if [ "${CHECK_OPENSHIFT_CLUSTER:-true}" == "true" ]; then + log_error "This script should only be run on OpenShift clusters" + exit 1 + fi fi app=${1} -app=$(echo "$app"| tr '[:lower:]' '[:upper:]') +app=$(echo "$app" | tr '[:lower:]' '[:upper:]') case "$app" in - "KIBANA"|"KB") - namespace="$LOG_NS" - service_name="v4m-es-kibana-svc" - port="kibana-svc" - tls_enable="true" - tls_secret="kibana-tls-secret" - ingress_tls_secret="kibana-ingress-tls-secret" - route_name="$service_name" - if [ "$OPENSHIFT_PATH_ROUTES" == "true" ]; then - route_host=${OPENSHIFT_ROUTE_HOST_KIBANA:-v4m-$namespace.$OPENSHIFT_ROUTE_DOMAIN} - route_path="/kibana" - else - route_host=${OPENSHIFT_ROUTE_HOST_KIBANA:-$service_name-$namespace.$OPENSHIFT_ROUTE_DOMAIN} - route_path="/" - fi - ;; - "ELASTICSEARCH"|"ES") - namespace="$LOG_NS" - service_name="v4m-es-client-service" - port="http" - tls_enable="true" - tls_secret="es-rest-tls-secret" - ingress_tls_secret="elasticsearch-ingress-tls-secret" - route_name="$service_name" - if [ "$OPENSHIFT_PATH_ROUTES" == "true" ]; then - route_host=${OPENSHIFT_ROUTE_HOST_ELASTICSEARCH:-v4m-$namespace.$OPENSHIFT_ROUTE_DOMAIN} - route_path="/elasticsearch" - else - route_host=${OPENSHIFT_ROUTE_HOST_ELASTICSEARCH:-$service_name-$namespace.$OPENSHIFT_ROUTE_DOMAIN} - route_path="/" - fi - ;; - "OPENSEARCH"|"OS") - namespace="$LOG_NS" - service_name="v4m-search" - port="http" - tls_enable="true" - tls_secret="es-rest-tls-secret" - ingress_tls_secret="elasticsearch-ingress-tls-secret" - route_name="$service_name" - if [ "$OPENSHIFT_PATH_ROUTES" == "true" ]; then +"OPENSEARCH" | "OS") + namespace="$LOG_NS" + service_name="v4m-search" + port="http" + tls_enable="true" + tls_secret="es-rest-tls-secret" + ingress_tls_secret="elasticsearch-ingress-tls-secret" + route_name="$service_name" + if [ "$OPENSHIFT_PATH_ROUTES" == "true" ]; then route_host=${OPENSHIFT_ROUTE_HOST_ELASTICSEARCH:-v4m-$namespace.$OPENSHIFT_ROUTE_DOMAIN} route_path="/opensearch" - else + else route_host=${OPENSHIFT_ROUTE_HOST_ELASTICSEARCH:-$service_name-$namespace.$OPENSHIFT_ROUTE_DOMAIN} route_path="/" - fi - ;; - "OSD"|"OPENSEARCHDASHBOARD"|"OPENSEARCHDASHBOARDS") - namespace="$LOG_NS" - service_name="v4m-osd" - port="http" - tls_enable="true" - tls_secret="kibana-tls-secret" - ingress_tls_secret="kibana-ingress-tls-secret" - route_name="$service_name" - if [ "$OPENSHIFT_PATH_ROUTES" == "true" ]; then + fi + ;; +"OSD" | "OPENSEARCHDASHBOARD" | "OPENSEARCHDASHBOARDS") + namespace="$LOG_NS" + service_name="v4m-osd" + port="http" + tls_enable="true" + tls_secret="kibana-tls-secret" + ingress_tls_secret="kibana-ingress-tls-secret" + route_name="$service_name" + if [ "$OPENSHIFT_PATH_ROUTES" == "true" ]; then route_host=${OPENSHIFT_ROUTE_HOST_KIBANA:-v4m-$namespace.$OPENSHIFT_ROUTE_DOMAIN} route_path="/dashboards" - else + else route_host=${OPENSHIFT_ROUTE_HOST_KIBANA:-$service_name-$namespace.$OPENSHIFT_ROUTE_DOMAIN} route_path="/" - fi - ;; - ""|*) - log_error "Application name is invalid or missing." - log_error "The APPLICATION NAME is required; valid values are: OpenSearch or OpenSearchDashboards" - exit 1 - ;; + fi + ;; +"" | *) + log_error "Application name is invalid or missing." + log_error "The APPLICATION NAME is required; valid values are: OpenSearch or OpenSearchDashboards" + exit 1 + ;; esac -if oc -n $namespace get route $route_name 2>/dev/null 1>&2; then - log_info "Skipping route creation; the requested route [$route_name] already exists in the namespace [$namespace]." - exit 0 +if oc -n "$namespace" get route $route_name > /dev/null 2>&1; then + log_info "Skipping route creation; the requested route [$route_name] already exists in the namespace [$namespace]." + exit 0 fi if [ "$tls_enable" != "true" ]; then - tls_mode="edge" + tls_mode="edge" else - if oc -n $namespace get secret $tls_secret 2>/dev/null 1>&2; then - tls_mode="reencrypt" - else - log_error "The specified secret [$tls_secret] does NOT exists in the namespace [$namespace]." - exit 1 - fi + if oc -n "$namespace" get secret $tls_secret > /dev/null 2>&1; then + tls_mode="reencrypt" + else + log_error "The specified secret [$tls_secret] does NOT exists in the namespace [$namespace]." + exit 1 + fi fi -oc -n $namespace create route $tls_mode $route_name \ +oc -n "$namespace" create route $tls_mode $route_name \ --service $service_name \ --port=$port \ --insecure-policy=Redirect \ - --hostname $route_host \ + --hostname "$route_host" \ --path $route_path rc=$? if [ "$OPENSHIFT_PATH_ROUTES" == "true" ]; then - oc -n $namespace annotate route $route_name "haproxy.router.openshift.io/rewrite-target=/" + oc -n "$namespace" annotate route $route_name "haproxy.router.openshift.io/rewrite-target=/" fi if [ "$rc" != "0" ]; then - log_error "There was a problem creating the route for [$route_name]. [$rc]" - exit 1 + log_error "There was a problem creating the route for [$route_name]. [$rc]" + exit 1 fi if [ "$tls_enable" == "true" ]; then - # identify secret containing destination CA - oc -n $namespace annotate route $route_name cert-utils-operator.redhat-cop.io/destinationCA-from-secret=$tls_secret + # identify secret containing destination CA + oc -n "$namespace" annotate route $route_name cert-utils-operator.redhat-cop.io/destinationCA-from-secret=$tls_secret fi - -if oc -n $namespace get secret $ingress_tls_secret 2>/dev/null 1>&2; then - # Add annotation to identify secret containing TLS certs - oc -n $namespace annotate route $route_name cert-utils-operator.redhat-cop.io/certs-from-secret=$ingress_tls_secret +if oc -n "$namespace" get secret $ingress_tls_secret > /dev/null 2>&1; then + # Add annotation to identify secret containing TLS certs + oc -n "$namespace" annotate route $route_name cert-utils-operator.redhat-cop.io/certs-from-secret=$ingress_tls_secret else - log_debug "The ingress secret [$ingress_tls_secret] does NOT exists, omitting annotation [certs-from-secret]." + log_debug "The ingress secret [$ingress_tls_secret] does NOT exists, omitting annotation [certs-from-secret]." fi log_info "OpenShift Route [$route_name] has been created." diff --git a/logging/bin/deploy_esexporter.sh b/logging/bin/deploy_esexporter.sh index b0a03c68..0ac81f70 100755 --- a/logging/bin/deploy_esexporter.sh +++ b/logging/bin/deploy_esexporter.sh @@ -3,19 +3,19 @@ # Copyright © 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh source logging/bin/secrets-include.sh -this_script=`basename "$0"` +this_script=$(basename "$0") log_debug "Script [$this_script] has started [$(date)]" ELASTICSEARCH_EXPORTER_ENABLED=${ELASTICSEARCH_EXPORTER_ENABLED:-true} if [ "$ELASTICSEARCH_EXPORTER_ENABLED" != "true" ]; then - log_verbose "Environment variable [ELASTICSEARCH_EXPORTER_ENABLED] is not set to 'true'; exiting WITHOUT deploying Elasticsearch Exporter" - exit + log_verbose "Environment variable [ELASTICSEARCH_EXPORTER_ENABLED] is not set to 'true'; exiting WITHOUT deploying Elasticsearch Exporter" + exit fi set -e @@ -25,48 +25,50 @@ log_info "Deploying Elasticsearch metric exporter ..." # check for pre-reqs # Confirm namespace exists -if [ "$(kubectl get ns $LOG_NS -o name 2>/dev/null)" == "" ]; then - log_error "Namespace [$LOG_NS] does NOT exist." - exit 1 +if [ -z "$(kubectl get ns "$LOG_NS" -o name 2> /dev/null)" ]; then + log_error "Namespace [$LOG_NS] does NOT exist." + exit 1 fi # get credentials get_credentials_from_secret metricgetter rc=$? -if [ "$rc" != "0" ] ;then log_debug "RC=$rc"; exit $rc;fi - - -if helm3ReleaseExists es-exporter $LOG_NS; then - #remove an existing instance if it does NOT have the most current set of labels - # NOTE: pod label 'app' changed to 'app.kubernetes.io/name' w/Helm chart 6.x - if [ -z $(kubectl -n $LOG_NS get pods -l "app.kubernetes.io/name=prometheus-elasticsearch-exporter,searchbackend=opensearch" -o name 2>/dev/null) ]; then - log_debug "Removing an outdated version of Helm release [es-exporter]" - helm -n $LOG_NS delete es-exporter - fi - - if kubectl get crd servicemonitors.monitoring.coreos.com 2>1 1>/dev/null; then - #serviceMonitor CRD may not be present if metric monitoring stack is not deployed - monNamespace=$(kubectl get servicemonitor -A --field-selector=metadata.name=elasticsearch -l sas.com/monitoring-base=kube-viya-monitoring -o=custom-columns=NAMESPACE:.metadata.namespace --no-headers) - if [ -n "$monNamespace" ]; then - log_debug "Removing obsolete serviceMonitor [$monNamespace/elasticsearch]" - kubectl delete -n $monNamespace servicemonitor elasticsearch - log_debug "Deploying an updated serviceMonitor for Elasticsearch [$monNamespace/elasticsearch-v2]" - kubectl apply -n $monNamespace -f monitoring/monitors/logging/serviceMonitor-elasticsearch-v2.yaml - else - log_debug "No instance of the obsolete elasticsearch serviceMonitor found." - fi - else - log_debug "No serviceMonitor CRD detected; skipping check for obsolete elasticseach serviceMonitor instance." - fi +if [ "$rc" != "0" ]; then + log_debug "RC=$rc" + exit $rc +fi + +if helm3ReleaseExists es-exporter "$LOG_NS"; then + #remove an existing instance if it does NOT have the most current set of labels + # NOTE: pod label 'app' changed to 'app.kubernetes.io/name' w/Helm chart 6.x + if [ -z "$(kubectl -n "$LOG_NS" get pods -l "app.kubernetes.io/name=prometheus-elasticsearch-exporter,searchbackend=opensearch" -o name 2> /dev/null)" ]; then + log_debug "Removing an outdated version of Helm release [es-exporter]" + helm -n "$LOG_NS" delete es-exporter + fi + + if kubectl get crd servicemonitors.monitoring.coreos.com > /dev/null 2>&1; then + #serviceMonitor CRD may not be present if metric monitoring stack is not deployed + monNamespace=$(kubectl get servicemonitor -A --field-selector=metadata.name=elasticsearch -l sas.com/monitoring-base=kube-viya-monitoring -o=custom-columns=NAMESPACE:.metadata.namespace --no-headers) + if [ -n "$monNamespace" ]; then + log_debug "Removing obsolete serviceMonitor [$monNamespace/elasticsearch]" + kubectl delete -n "$monNamespace" servicemonitor elasticsearch + log_debug "Deploying an updated serviceMonitor for Elasticsearch [$monNamespace/elasticsearch-v2]" + kubectl apply -n "$monNamespace" -f monitoring/monitors/logging/serviceMonitor-elasticsearch-v2.yaml + else + log_debug "No instance of the obsolete elasticsearch serviceMonitor found." + fi + else + log_debug "No serviceMonitor CRD detected; skipping check for obsolete elasticseach serviceMonitor instance." + fi else - log_debug "No existing Helm release [es-exporter] found." + log_debug "No existing Helm release [es-exporter] found." fi # enable debug on Helm via env var export HELM_DEBUG="${HELM_DEBUG:-false}" if [ "$HELM_DEBUG" == "true" ]; then - helmDebug="--debug" + helmDebug="--debug" fi helmRepoAdd prometheus-community https://prometheus-community.github.io/helm-charts @@ -80,8 +82,8 @@ generateImageKeysFile "$ES_EXPORTER_FULL_IMAGE" "logging/esexporter/es-exporter_ # Load any user customizations/overrides ES_OPEN_EXPORTER_USER_YAML="${ES_OPEN_EXPORTER_USER_YAML:-$USER_DIR/logging/user-values-es-exporter.yaml}" if [ ! -f "$ES_OPEN_EXPORTER_USER_YAML" ]; then - log_debug "[$ES_OPEN_EXPORTER_USER_YAML] not found. Using $TMP_DIR/empty.yaml" - ES_OPEN_EXPORTER_USER_YAML=$TMP_DIR/empty.yaml + log_debug "[$ES_OPEN_EXPORTER_USER_YAML] not found. Using $TMP_DIR/empty.yaml" + ES_OPEN_EXPORTER_USER_YAML=$TMP_DIR/empty.yaml fi # Enable workload node placement? @@ -89,44 +91,43 @@ LOG_NODE_PLACEMENT_ENABLE=${LOG_NODE_PLACEMENT_ENABLE:-${NODE_PLACEMENT_ENABLE:- # Optional workload node placement support if [ "$LOG_NODE_PLACEMENT_ENABLE" == "true" ]; then - log_verbose "Enabling elasticsearch exporter for workload node placement" - wnpValuesFile="logging/node-placement/values-elasticsearch-exporter-wnp.yaml" + log_verbose "Enabling elasticsearch exporter for workload node placement" + wnpValuesFile="logging/node-placement/values-elasticsearch-exporter-wnp.yaml" else - log_debug "Workload node placement support is disabled for the elasticsearch exporter" - wnpValuesFile="$TMP_DIR/empty.yaml" + log_debug "Workload node placement support is disabled for the elasticsearch exporter" + wnpValuesFile="$TMP_DIR/empty.yaml" fi - # Point to OpenShift response file or dummy as appropriate if [ "$OPENSHIFT_CLUSTER" == "true" ]; then - log_verbose "Deploying Elasticsearch metric exporter onto OpenShift cluster" - openshiftValuesFile="logging/openshift/values-elasticsearch-exporter.yaml" + log_verbose "Deploying Elasticsearch metric exporter onto OpenShift cluster" + openshiftValuesFile="logging/openshift/values-elasticsearch-exporter.yaml" else - log_debug "Elasticsearch metric exporter is NOT being deployed on OpenShift cluster" - openshiftValuesFile="$TMP_DIR/empty.yaml" + log_debug "Elasticsearch metric exporter is NOT being deployed on OpenShift cluster" + openshiftValuesFile="$TMP_DIR/empty.yaml" fi # Elasticsearch metric exporter -helm2ReleaseCheck es-exporter-$LOG_NS - +helm2ReleaseCheck es-exporter-"$LOG_NS" ## Get Helm Chart Name log_debug "Elasticsearch Exporter Helm Chart: repo [$ESEXPORTER_HELM_CHART_REPO] name [$ESEXPORTER_HELM_CHART_NAME] version [$ESEXPORTER_HELM_CHART_VERSION]" -chart2install="$(get_helmchart_reference $ESEXPORTER_HELM_CHART_REPO $ESEXPORTER_HELM_CHART_NAME $ESEXPORTER_HELM_CHART_VERSION)" -versionstring="$(get_helm_versionstring $ESEXPORTER_HELM_CHART_VERSION)" +chart2install="$(get_helmchart_reference "$ESEXPORTER_HELM_CHART_REPO" "$ESEXPORTER_HELM_CHART_NAME" "$ESEXPORTER_HELM_CHART_VERSION")" +versionstring="$(get_helm_versionstring "$ESEXPORTER_HELM_CHART_VERSION")" log_debug "Installing Helm chart from artifact [$chart2install]" +# shellcheck disable=SC2086 helm $helmDebug upgrade --install es-exporter \ - --namespace $LOG_NS \ - -f $imageKeysFile \ - -f $primaryValuesFile \ - -f $wnpValuesFile \ - -f $openshiftValuesFile \ - -f $ES_OPEN_EXPORTER_USER_YAML \ - --set fullnameOverride=v4m-es-exporter \ - $versionstring \ - $chart2install + --namespace "$LOG_NS" \ + -f "$imageKeysFile" \ + -f "$primaryValuesFile" \ + -f "$wnpValuesFile" \ + -f "$openshiftValuesFile" \ + -f "$ES_OPEN_EXPORTER_USER_YAML" \ + --set fullnameOverride=v4m-es-exporter \ + $versionstring \ + "$chart2install" log_info "Elasticsearch metric exporter has been deployed" diff --git a/logging/bin/deploy_logging.sh b/logging/bin/deploy_logging.sh index 3091140e..707fe02c 100755 --- a/logging/bin/deploy_logging.sh +++ b/logging/bin/deploy_logging.sh @@ -3,20 +3,19 @@ # Copyright © 2022, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh source bin/autogenerate-include.sh # Confirm NOT on OpenShift if [ "$OPENSHIFT_CLUSTER" == "true" ]; then - if [ "${CHECK_OPENSHIFT_CLUSTER:-true}" == "true" ]; then - log_error "This script should NOT be run on OpenShift clusters" - log_error "Run logging/bin/deploy_logging_openshift.sh instead" - exit 1 - fi + if [ "${CHECK_OPENSHIFT_CLUSTER:-true}" == "true" ]; then + log_error "This script should NOT be run on OpenShift clusters" + log_error "Run logging/bin/deploy_logging_openshift.sh instead" + exit 1 + fi fi - # set flag indicating wrapper/driver script being run export LOGGING_DRIVER=true @@ -27,11 +26,11 @@ export LOGGING_DRIVER=true checkDefaultStorageClass # Create namespace if it doesn't exist -if [ "$(kubectl get ns $LOG_NS -o name 2>/dev/null)" == "" ]; then - kubectl create ns $LOG_NS +if [ -z "$(kubectl get ns "$LOG_NS" -o name 2> /dev/null)" ]; then + kubectl create ns "$LOG_NS" - #Container Security: Disable serviceAccount Token Automounting - disable_sa_token_automount $LOG_NS default + #Container Security: Disable serviceAccount Token Automounting + disable_sa_token_automount "$LOG_NS" default fi log_notice "Deploying logging components to the [$LOG_NS] namespace [$(date)]" @@ -88,22 +87,20 @@ set +e bin/show_app_url.sh OSD OS set -e - ################################## # Version Info # ################################## # If a deployment with the old name exists, remove it first -if helm3ReleaseExists v4m $LOG_NS; then - log_verbose "Removing outdated SAS Viya Monitoring Helm chart release from [$LOG_NS] namespace" - helm uninstall -n "$LOG_NS" "v4m" +if helm3ReleaseExists v4m "$LOG_NS"; then + log_verbose "Removing outdated SAS Viya Monitoring Helm chart release from [$LOG_NS] namespace" + helm uninstall -n "$LOG_NS" "v4m" fi if ! deployV4MInfo "$LOG_NS" "v4m-logs"; then - log_warn "Unable to update SAS Viya Monitoring Helm chart release" + log_warn "Unable to update SAS Viya Monitoring Helm chart release" fi - # Write any "notices" to console log_message "" display_notices @@ -111,4 +108,3 @@ display_notices log_message "" log_notice "The deployment of logging components has completed [$(date)]" echo "" - diff --git a/logging/bin/deploy_logging_open.sh b/logging/bin/deploy_logging_open.sh deleted file mode 100755 index 4823b997..00000000 --- a/logging/bin/deploy_logging_open.sh +++ /dev/null @@ -1,11 +0,0 @@ -#! /bin/bash - -# Copyright © 2022, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -cd "$(dirname $BASH_SOURCE)/../.." -source logging/bin/common.sh - -log_error "This script is obsolete." -log_error "Run logging/bin/deploy_logging.sh instead." - diff --git a/logging/bin/deploy_logging_openshift.sh b/logging/bin/deploy_logging_openshift.sh index a113581a..e79b4fe7 100755 --- a/logging/bin/deploy_logging_openshift.sh +++ b/logging/bin/deploy_logging_openshift.sh @@ -3,7 +3,7 @@ # Copyright © 2022, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh ################################## @@ -11,11 +11,11 @@ source logging/bin/common.sh ################################## if [ "$OPENSHIFT_CLUSTER" != "true" ]; then - if [ "${CHECK_OPENSHIFT_CLUSTER:-true}" == "true" ]; then - log_error "This script should only be run on OpenShift clusters" - log_error "Run logging/bin/deploy_logging.sh instead" - exit 1 - fi + if [ "${CHECK_OPENSHIFT_CLUSTER:-true}" == "true" ]; then + log_error "This script should only be run on OpenShift clusters" + log_error "Run logging/bin/deploy_logging.sh instead" + exit 1 + fi fi # set flag indicating wrapper/driver script being run @@ -28,20 +28,19 @@ export LOGGING_DRIVER=true checkDefaultStorageClass # Create namespace if it doesn't exist -if [ "$(kubectl get ns $LOG_NS -o name 2>/dev/null)" == "" ]; then - kubectl create ns $LOG_NS +if [ -z "$(kubectl get ns "$LOG_NS" -o name 2> /dev/null)" ]; then + kubectl create ns "$LOG_NS" - #Container Security: Disable serviceAccount Token Automounting - disable_sa_token_automount $LOG_NS default - disable_sa_token_automount $LOG_NS builder - disable_sa_token_automount $LOG_NS deployer + #Container Security: Disable serviceAccount Token Automounting + disable_sa_token_automount "$LOG_NS" default + disable_sa_token_automount "$LOG_NS" builder + disable_sa_token_automount "$LOG_NS" deployer fi set -e log_notice "Deploying logging components to the [$LOG_NS] namespace [$(date)]" - ################################## # OpenShift-specific Set-up # ################################## @@ -57,28 +56,24 @@ export LOG_KB_TLS_ENABLE=true log_info "STEP 1: OpenSearch" logging/bin/deploy_opensearch.sh - ################################## # OpenSearch Dashboards (Kibana) # ################################## log_info "STEP 2: OpenSearch Dashboards" logging/bin/deploy_osd.sh - ################################## # Elasticsearch Metric Exporter # ################################## log_info "STEP 3: Elasticsearch metric exporter" logging/bin/deploy_esexporter.sh - ################################## # OpenSearch Content (OpenShift) # ################################## log_info "STEP 4: Loading Content into OpenSearch" logging/bin/deploy_opensearch_content.sh - ################################## # OSD Content # ################################## @@ -86,7 +81,6 @@ log_info "STEP 5: Configuring OpenSearch Dashboards" KB_KNOWN_NODEPORT_ENABLE=false logging/bin/deploy_osd_content.sh - ################################## # Fluent Bit - Log Messages # ################################## @@ -99,7 +93,6 @@ logging/bin/deploy_fluentbit_opensearch.sh log_info "STEP 7: Deploying Fluent Bit - K8s Event Collection" logging/bin/deploy_fluentbit_k8sevents_opensearch.sh - ################################## # Create OpenShift Route(s) # # and display app URL(s) # @@ -110,23 +103,22 @@ OPENSHIFT_ROUTES_ENABLE=${OPENSHIFT_ROUTES_ENABLE:-true} if [ "$OPENSHIFT_ROUTES_ENABLE" == "true" ]; then - servicelist="OSD" - logging/bin/create_openshift_route.sh OSD + servicelist="OSD" + logging/bin/create_openshift_route.sh OSD - OPENSHIFT_ES_ROUTE_ENABLE=${OPENSHIFT_ES_ROUTE_ENABLE:-false} - if [ "$OPENSHIFT_ES_ROUTE_ENABLE" == "true" ]; then - logging/bin/create_openshift_route.sh OS OSD + OPENSHIFT_ES_ROUTE_ENABLE=${OPENSHIFT_ES_ROUTE_ENABLE:-false} + if [ "$OPENSHIFT_ES_ROUTE_ENABLE" == "true" ]; then + logging/bin/create_openshift_route.sh OS OSD - servicelist="OS OSD" - fi + servicelist="OS OSD" + fi - bin/show_app_url.sh $servicelist + bin/show_app_url.sh "$servicelist" else - log_info "Environment variable [OPENSHIFT_ROUTES_ENABLE] is not set to 'true'; continuing WITHOUT deploying OpenShift Routes" + log_info "Environment variable [OPENSHIFT_ROUTES_ENABLE] is not set to 'true'; continuing WITHOUT deploying OpenShift Routes" fi - ################################## # Service Monitors # ################################## @@ -134,23 +126,21 @@ log_info "STEP 9: Deploying Service Monitors" export DEPLOY_SERVICEMONITORS=${DEPLOY_SERVICEMONITORS:-true} logging/bin/deploy_servicemonitors_openshift.sh - ################################## # Version Info # ################################## log_info "STEP 10: Updating version info" # If a deployment with the old name exists, remove it first -if helm3ReleaseExists v4m $LOG_NS; then - log_verbose "Removing outdated SAS Viya Monitoring Helm chart release from [$LOG_NS] namespace" - helm uninstall -n "$LOG_NS" "v4m" +if helm3ReleaseExists v4m "$LOG_NS"; then + log_verbose "Removing outdated SAS Viya Monitoring Helm chart release from [$LOG_NS] namespace" + helm uninstall -n "$LOG_NS" "v4m" fi if ! deployV4MInfo "$LOG_NS" "v4m-logs"; then - log_warn "Unable to update SAS Viya Monitoring version info" + log_warn "Unable to update SAS Viya Monitoring version info" fi - ################################## # Display Notices # ################################## @@ -158,7 +148,6 @@ fi echo "" display_notices - echo "" log_notice "The deployment of logging components has completed [$(date)]" echo "" diff --git a/logging/bin/deploy_opensearch.sh b/logging/bin/deploy_opensearch.sh index 8b212d67..fecb824f 100755 --- a/logging/bin/deploy_opensearch.sh +++ b/logging/bin/deploy_opensearch.sh @@ -3,22 +3,22 @@ # Copyright © 2022-2025, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh source logging/bin/secrets-include.sh source bin/tls-include.sh source bin/autogenerate-include.sh source logging/bin/apiaccess-include.sh -this_script=`basename "$0"` +this_script=$(basename "$0") log_debug "Script [$this_script] has started [$(date)]" ELASTICSEARCH_ENABLE=${ELASTICSEARCH_ENABLE:-true} if [ "$ELASTICSEARCH_ENABLE" != "true" ]; then - log_verbose "Environment variable [ELASTICSEARCH_ENABLE] is not set to 'true'; exiting WITHOUT deploying OpenSearch" - exit 0 + log_verbose "Environment variable [ELASTICSEARCH_ENABLE] is not set to 'true'; exiting WITHOUT deploying OpenSearch" + exit 0 fi autogeneratedYAMLFile="$TMP_DIR/autogenerate-opensearch.yaml" @@ -27,83 +27,81 @@ AUTOGENERATE_STORAGECLASS="${AUTOGENERATE_STORAGECLASS:-false}" if [ "$AUTOGENERATE_STORAGECLASS" == "true" ]; then - if [ ! -f "$autogeneratedYAMLFile" ]; then - log_debug "Creating file [$autogeneratedYAMLFile]" - touch "$autogeneratedYAMLFile" - else - log_debug "File [$autogeneratedYAMLFile] already exists" - fi + if [ ! -f "$autogeneratedYAMLFile" ]; then + log_debug "Creating file [$autogeneratedYAMLFile]" + touch "$autogeneratedYAMLFile" + else + log_debug "File [$autogeneratedYAMLFile] already exists" + fi - storageClass="${OPENSEARCH_STORAGECLASS:-$STORAGECLASS}" - checkStorageClass OPENSEARCH_STORAGECLASS "$OPENSEARCH_STORAGECLASS" - sc="$storageClass" yq -i '.persistence.storageClass=env(sc)' "$autogeneratedYAMLFile" + storageClass="${OPENSEARCH_STORAGECLASS:-$STORAGECLASS}" + checkStorageClass OPENSEARCH_STORAGECLASS "$OPENSEARCH_STORAGECLASS" + sc="$storageClass" yq -i '.persistence.storageClass=env(sc)' "$autogeneratedYAMLFile" else - log_debug "Autogeneration of storageClass References NOT enabled" + log_debug "Autogeneration of storageClass References NOT enabled" fi AUTOGENERATE_INGRESS="${AUTOGENERATE_INGRESS:-false}" OPENSEARCH_INGRESS_ENABLE="${OPENSEARCH_INGRESS_ENABLE:-false}" -if [ "$AUTOGENERATE_INGRESS" == "true" ] && [ "$OPENSEARCH_INGRESS_ENABLE"="true" ]; then +if [ "$AUTOGENERATE_INGRESS" == "true" ] && [ "$OPENSEARCH_INGRESS_ENABLE" = "true" ]; then - if [ ! -f "$autogeneratedYAMLFile" ]; then - log_debug "Creating file [$autogeneratedYAMLFile]" - touch "$autogeneratedYAMLFile" - else - log_debug "File [$autogeneratedYAMLFile] already exists" - fi + if [ ! -f "$autogeneratedYAMLFile" ]; then + log_debug "Creating file [$autogeneratedYAMLFile]" + touch "$autogeneratedYAMLFile" + else + log_debug "File [$autogeneratedYAMLFile] already exists" + fi - osIngressCert="${OPENSEARCH_INGRESS_CERT}" - osIngressKey="${OPENSEARCH_INGRESS_KEY}" + osIngressCert="${OPENSEARCH_INGRESS_CERT}" + osIngressKey="${OPENSEARCH_INGRESS_KEY}" - create_ingress_certs "$LOG_NS" "elasticsearch-ingress-tls-secret" "$osIngressCert" "$osIngressKey" + create_ingress_certs "$LOG_NS" "elasticsearch-ingress-tls-secret" "$osIngressCert" "$osIngressKey" - ROUTING="${ROUTING:-host}" + ROUTING="${ROUTING:-host}" - ## tested with sample version: 0.2.1 - ingressSampleFile="samples/ingress/${ROUTING}-based-ingress/logging/user-values-opensearch.yaml" + ## tested with sample version: 0.2.1 + ingressSampleFile="samples/ingress/${ROUTING}-based-ingress/logging/user-values-opensearch.yaml" - #intialized the yaml file w/appropriate ingress sample - yq -i eval-all '. as $item ireduce ({}; . * $item )' "$autogeneratedYAMLFile" "$ingressSampleFile" + #intialized the yaml file w/appropriate ingress sample + # shellcheck disable=SC2016 + yq -i eval-all '. as $item ireduce ({}; . * $item )' "$autogeneratedYAMLFile" "$ingressSampleFile" - - OPENSEARCH_FQDN="${OPENSEARCH_FQDN}" - OPENSEARCH_PATH="${OPENSEARCH_PATH:-search}" - if [ -z "$OPENSEARCH_FQDN" ]; then - if [ "$ROUTING" == "host" ]; then - OPENSEARCH_FQDN="$OPENSEARCH_PATH.$BASE_DOMAIN" - else - OPENSEARCH_FQDN="$BASE_DOMAIN" - fi - fi + OPENSEARCH_PATH="${OPENSEARCH_PATH:-search}" + if [ -z "$OPENSEARCH_FQDN" ]; then + if [ "$ROUTING" == "host" ]; then + OPENSEARCH_FQDN="$OPENSEARCH_PATH.$BASE_DOMAIN" + else + OPENSEARCH_FQDN="$BASE_DOMAIN" + fi + fi - log_debug "OPENSEARCH_INGRESS_ENABLE [$OPENSEARCH_INGRESS_ENABLE] OPENSEARCH_FQDN [$OPENSEARCH_FQDN] OPENSEARCH_PATH [$OPENSEARCH_PATH]" + log_debug "OPENSEARCH_INGRESS_ENABLE [$OPENSEARCH_INGRESS_ENABLE] OPENSEARCH_FQDN [$OPENSEARCH_FQDN] OPENSEARCH_PATH [$OPENSEARCH_PATH]" - export OPENSEARCH_INGRESS_ENABLE OPENSEARCH_FQDN OPENSEARCH_PATH + export OPENSEARCH_INGRESS_ENABLE OPENSEARCH_FQDN OPENSEARCH_PATH - yq -i '.ingress.enabled=env(OPENSEARCH_INGRESS_ENABLE)' $autogeneratedYAMLFile + yq -i '.ingress.enabled=env(OPENSEARCH_INGRESS_ENABLE)' "$autogeneratedYAMLFile" - if [ "$ROUTING" == "host" ]; then - yq -i '.ingress.hosts.[0]=env(OPENSEARCH_FQDN)' $autogeneratedYAMLFile - yq -i '.ingress.tls.[0].hosts.[0]=env(OPENSEARCH_FQDN)' $autogeneratedYAMLFile - else - slashpath="/$OPENSEARCH_PATH" yq -i '.ingress.path=env(slashpath)' $autogeneratedYAMLFile - yq -i '.ingress.hosts.[0]=env(OPENSEARCH_FQDN)' $autogeneratedYAMLFile + if [ "$ROUTING" == "host" ]; then + yq -i '.ingress.hosts.[0]=env(OPENSEARCH_FQDN)' "$autogeneratedYAMLFile" + yq -i '.ingress.tls.[0].hosts.[0]=env(OPENSEARCH_FQDN)' "$autogeneratedYAMLFile" + else + slashpath="/$OPENSEARCH_PATH" yq -i '.ingress.path=env(slashpath)' "$autogeneratedYAMLFile" + yq -i '.ingress.hosts.[0]=env(OPENSEARCH_FQDN)' "$autogeneratedYAMLFile" - yq -i '.ingress.tls.[0].hosts.[0]=env(OPENSEARCH_FQDN)' $autogeneratedYAMLFile - slashpath="/$OPENSEARCH_PATH" yq -i '.ingress.annotations["nginx.ingress.kubernetes.io/rewrite-target"]=env(slashpath)' $autogeneratedYAMLFile + yq -i '.ingress.tls.[0].hosts.[0]=env(OPENSEARCH_FQDN)' "$autogeneratedYAMLFile" + slashpath="/$OPENSEARCH_PATH" yq -i '.ingress.annotations["nginx.ingress.kubernetes.io/rewrite-target"]=env(slashpath)' "$autogeneratedYAMLFile" - # Need to use printf to preserve newlines - printf -v snippet "rewrite (?i)/$OPENSEARCH_PATH/(.*) /\$1 break;\nrewrite (?i)/${OPENSEARCH_PATH}$ / break;" ; - snippet="$snippet" yq -i '.ingress.annotations["nginx.ingress.kubernetes.io/configuration-snippet"]=strenv(snippet)' $autogeneratedYAMLFile + # Need to use printf to preserve newlines + printf -v snippet "rewrite (?i)/%s/(.*) /\$1 break;\nrewrite (?i)/%s$ / break;" "$OPENSEARCH_PATH" "$OPENSEARCH_PATH" + snippet="$snippet" yq -i '.ingress.annotations["nginx.ingress.kubernetes.io/configuration-snippet"]=strenv(snippet)' "$autogeneratedYAMLFile" - fi + fi else - log_debug "Autogeneration of ingresss NOT enabled and/or ingress NOT enabled for OpenSearch" + log_debug "Autogeneration of ingresss NOT enabled and/or ingress NOT enabled for OpenSearch" fi - set -e # @@ -113,18 +111,18 @@ set -e checkDefaultStorageClass # Confirm namespace exists -if [ "$(kubectl get ns $LOG_NS -o name 2>/dev/null)" == "" ]; then - log_error "Namespace [$LOG_NS] does NOT exist." - exit 1 +if [ -z "$(kubectl get ns "$LOG_NS" -o name 2> /dev/null)" ]; then + log_error "Namespace [$LOG_NS] does NOT exist." + exit 1 fi #Generate yaml files with all container-related keys -generateImageKeysFile "$OS_FULL_IMAGE" "logging/opensearch/os_container_image.template" -generateImageKeysFile "$OS_SYSCTL_FULL_IMAGE" "$imageKeysFile" "OS_SYSCTL_" +generateImageKeysFile "$OS_FULL_IMAGE" "logging/opensearch/os_container_image.template" +generateImageKeysFile "$OS_SYSCTL_FULL_IMAGE" "$imageKeysFile" "OS_SYSCTL_" #Copy imageKeysFile since next call will replace existing one cp "$imageKeysFile" "$TMP_DIR/opensearch_imagekeysfile.yaml" -generateImageKeysFile "$OS_FULL_IMAGE" "logging/opensearch/os_initcontainer_image.template" "" "true" +generateImageKeysFile "$OS_FULL_IMAGE" "logging/opensearch/os_initcontainer_image.template" "" "true" # get credentials export ES_ADMIN_PASSWD=${ES_ADMIN_PASSWD} @@ -133,156 +131,154 @@ export ES_LOGCOLLECTOR_PASSWD=${ES_LOGCOLLECTOR_PASSWD} export ES_METRICGETTER_PASSWD=${ES_METRICGETTER_PASSWD} # Create secrets containing internal user credentials -create_user_secret internal-user-admin admin "$ES_ADMIN_PASSWD" managed-by=v4m-es-script -create_user_secret internal-user-kibanaserver kibanaserver "$ES_KIBANASERVER_PASSWD" managed-by=v4m-es-script -create_user_secret internal-user-logcollector logcollector "$ES_LOGCOLLECTOR_PASSWD" managed-by=v4m-es-script -create_user_secret internal-user-metricgetter metricgetter "$ES_METRICGETTER_PASSWD" managed-by=v4m-es-script +create_user_secret internal-user-admin admin "$ES_ADMIN_PASSWD" managed-by=v4m-es-script +create_user_secret internal-user-kibanaserver kibanaserver "$ES_KIBANASERVER_PASSWD" managed-by=v4m-es-script +create_user_secret internal-user-logcollector logcollector "$ES_LOGCOLLECTOR_PASSWD" managed-by=v4m-es-script +create_user_secret internal-user-metricgetter metricgetter "$ES_METRICGETTER_PASSWD" managed-by=v4m-es-script #cert_generator="${CERT_GENERATOR:-openssl}" # Verify cert generator is available (if necessary) -if verify_cert_generator $LOG_NS es-transport es-rest es-admin; then - log_debug "cert generator check OK [$cert_generator_ok]" +if verify_cert_generator "$LOG_NS" es-transport es-rest es-admin; then + log_debug "cert generator check OK [$cert_generator_ok]" else - log_error "One or more required TLS certs do not exist and the expected certificate generator mechanism [$CERT_GENERATOR] is not available to create the missing certs" - exit 1 + log_error "One or more required TLS certs do not exist and the expected certificate generator mechanism [$CERT_GENERATOR] is not available to create the missing certs" + exit 1 fi # Create/Get necessary TLS certs -create_tls_certs $LOG_NS logging es-transport es-rest es-admin +create_tls_certs "$LOG_NS" logging es-transport es-rest es-admin # need to wait for cert-manager to create all certs and secrets sleep 10 # Get subject from admin and transport cert for opensearch.yaml -if [ ! -f $TMP_DIR/es-transport.pem ]; then - log_debug "Extracting es-transport cert from secret" - kubectl -n $LOG_NS get secret es-transport-tls-secret -o=jsonpath="{.data.tls\.crt}" |base64 --decode > $TMP_DIR/es-transport.pem +if [ ! -f "$TMP_DIR"/es-transport.pem ]; then + log_debug "Extracting es-transport cert from secret" + kubectl -n "$LOG_NS" get secret es-transport-tls-secret -o=jsonpath="{.data.tls\.crt}" | base64 --decode > "$TMP_DIR"/es-transport.pem fi -node_dn=$(openssl x509 -subject -nameopt RFC2253 -noout -in $TMP_DIR/es-transport.pem | sed -e "s/subject=\s*\(\S*\)/\1/" -e "s/^[ \t]*//") +node_dn=$(openssl x509 -subject -nameopt RFC2253 -noout -in "$TMP_DIR"/es-transport.pem | sed -e "s/subject=\s*\(\S*\)/\1/" -e "s/^[ \t]*//") -if [ ! -f $TMP_DIR/es-admin.pem ]; then - log_debug "Extracting es-admin cert from secret" - kubectl -n $LOG_NS get secret es-admin-tls-secret -o=jsonpath="{.data.tls\.crt}" |base64 --decode > $TMP_DIR/es-admin.pem +if [ ! -f "$TMP_DIR"/es-admin.pem ]; then + log_debug "Extracting es-admin cert from secret" + kubectl -n "$LOG_NS" get secret es-admin-tls-secret -o=jsonpath="{.data.tls\.crt}" | base64 --decode > "$TMP_DIR"/es-admin.pem fi -admin_dn=$(openssl x509 -subject -nameopt RFC2253 -noout -in $TMP_DIR/es-admin.pem | sed -e "s/subject=\s*\(\S*\)/\1/" -e "s/^[ \t]*//") +admin_dn=$(openssl x509 -subject -nameopt RFC2253 -noout -in "$TMP_DIR"/es-admin.pem | sed -e "s/subject=\s*\(\S*\)/\1/" -e "s/^[ \t]*//") log_debug "Subjects node_dn:[$node_dn] admin_dn:[$admin_dn]" #write cert subjects to secret to be mounted as env var -kubectl -n $LOG_NS delete secret opensearch-cert-subjects --ignore-not-found -kubectl -n $LOG_NS create secret generic opensearch-cert-subjects --from-literal=node_dn="$node_dn" --from-literal=admin_dn="$admin_dn" -kubectl -n $LOG_NS label secret opensearch-cert-subjects managed-by=v4m-es-script +kubectl -n "$LOG_NS" delete secret opensearch-cert-subjects --ignore-not-found +kubectl -n "$LOG_NS" create secret generic opensearch-cert-subjects --from-literal=node_dn="$node_dn" --from-literal=admin_dn="$admin_dn" +kubectl -n "$LOG_NS" label secret opensearch-cert-subjects managed-by=v4m-es-script # Create ConfigMap for securityadmin script -kubectl -n $LOG_NS delete configmap run-securityadmin.sh --ignore-not-found -kubectl -n $LOG_NS create configmap run-securityadmin.sh --from-file logging/opensearch/bin/run_securityadmin.sh -kubectl -n $LOG_NS label configmap run-securityadmin.sh managed-by=v4m-es-script search-backend=opensearch +kubectl -n "$LOG_NS" delete configmap run-securityadmin.sh --ignore-not-found +kubectl -n "$LOG_NS" create configmap run-securityadmin.sh --from-file logging/opensearch/bin/run_securityadmin.sh +kubectl -n "$LOG_NS" label configmap run-securityadmin.sh managed-by=v4m-es-script search-backend=opensearch # Need to retrieve these from secrets in case secrets pre-existed -export ES_ADMIN_USER=$(kubectl -n $LOG_NS get secret internal-user-admin -o=jsonpath="{.data.username}" |base64 --decode) -export ES_ADMIN_PASSWD=$(kubectl -n $LOG_NS get secret internal-user-admin -o=jsonpath="{.data.password}" |base64 --decode) -export ES_METRICGETTER_USER=$(kubectl -n $LOG_NS get secret internal-user-metricgetter -o=jsonpath="{.data.username}" |base64 --decode) -export ES_METRICGETTER_PASSWD=$(kubectl -n $LOG_NS get secret internal-user-metricgetter -o=jsonpath="{.data.password}" |base64 --decode) +ES_ADMIN_USER=$(kubectl -n "$LOG_NS" get secret internal-user-admin -o=jsonpath="{.data.username}" --ignore-not-found | base64 --decode) +ES_ADMIN_PASSWD=$(kubectl -n "$LOG_NS" get secret internal-user-admin -o=jsonpath="{.data.password}" --ignore-not-found | base64 --decode) +ES_METRICGETTER_USER=$(kubectl -n "$LOG_NS" get secret internal-user-metricgetter -o=jsonpath="{.data.username}" --ignore-not-found | base64 --decode) +ES_METRICGETTER_PASSWD=$(kubectl -n "$LOG_NS" get secret internal-user-metricgetter -o=jsonpath="{.data.password}" --ignore-not-found | base64 --decode) +export ES_ADMIN_USER ES_ADMIN_PASSWD ES_METRICGETTER_USER ES_METRICGETTER_PASSWD # Generate message about autogenerated admin password -adminpwd_autogenerated=$(kubectl -n $LOG_NS get secret internal-user-admin -o jsonpath='{.metadata.labels.autogenerated_password}') -if [ ! -z "$adminpwd_autogenerated" ]; then - # Print info about how to obtain admin password - add_notice " " - add_notice "**The OpenSearch 'admin' Account**" - add_notice "Generated 'admin' password: $ES_ADMIN_PASSWD " - add_notice "To change the password for the 'admin' account at any time, run the " - add_notice "following command: " - add_notice " " - add_notice " logging/bin/change_internal_password.sh admin newPassword " - add_notice " " - add_notice "NOTE: *NEVER* change the password for the 'admin' account from within the" - add_notice "OpenSearch Dashboards web-interface. The 'admin' password should *ONLY* be changed via " - add_notice "the change_internal_password.sh script in the logging/bin sub-directory." - add_notice " " - - LOGGING_DRIVER=${LOGGING_DRIVER:-false} - if [ "$LOGGING_DRIVER" != "true" ]; then - echo "" - display_notices - echo "" - fi +adminpwd_autogenerated=$(kubectl -n "$LOG_NS" get secret internal-user-admin -o jsonpath='{.metadata.labels.autogenerated_password}') +if [ -n "$adminpwd_autogenerated" ]; then + # Print info about how to obtain admin password + add_notice " " + add_notice "**The OpenSearch 'admin' Account**" + add_notice "Generated 'admin' password: $ES_ADMIN_PASSWD " + add_notice "To change the password for the 'admin' account at any time, run the " + add_notice "following command: " + add_notice " " + add_notice " logging/bin/change_internal_password.sh admin newPassword " + add_notice " " + add_notice "NOTE: *NEVER* change the password for the 'admin' account from within the" + add_notice "OpenSearch Dashboards web-interface. The 'admin' password should *ONLY* be changed via " + add_notice "the change_internal_password.sh script in the logging/bin sub-directory." + add_notice " " + + LOGGING_DRIVER=${LOGGING_DRIVER:-false} + if [ "$LOGGING_DRIVER" != "true" ]; then + echo "" + display_notices + echo "" + fi fi - # enable debug on Helm via env var export HELM_DEBUG="${HELM_DEBUG:-false}" if [ "$HELM_DEBUG" == "true" ]; then - helmDebug="--debug" + helmDebug="--debug" fi -helmRepoAdd opensearch https://opensearch-project.github.io/helm-charts +helmRepoAdd opensearch https://opensearch-project.github.io/helm-charts # Check for existing OpenSearch helm release -if [ "$(helm -n $LOG_NS list --filter 'opensearch' -q)" == "opensearch" ]; then - log_debug "The Helm release [opensearch] already exists; upgrading the release." - existingSearch="true" +if [ "$(helm -n "$LOG_NS" list --filter 'opensearch' -q)" == "opensearch" ]; then + log_debug "The Helm release [opensearch] already exists; upgrading the release." + existingSearch="true" else - log_debug "The Helm release [opensearch] does NOT exist; deploying a new release." - existingSearch="false" + log_debug "The Helm release [opensearch] does NOT exist; deploying a new release." + existingSearch="false" fi -helm2ReleaseCheck odfe-$LOG_NS +helm2ReleaseCheck odfe-"$LOG_NS" # Check for existing Open Distro helm release -if [ "$(helm -n $LOG_NS list --filter 'odfe' -q)" == "odfe" ]; then - - log_error "An existing ODFE-based deployment was detected. It must be removed before deploying the current version." - exit 1 - +if [ "$(helm -n "$LOG_NS" list --filter 'odfe' -q)" == "odfe" ]; then + log_error "An existing ODFE-based deployment was detected. It must be removed before deploying the current version." + exit 1 fi # OpenSearch user customizations ES_OPEN_USER_YAML="${ES_OPEN_USER_YAML:-$USER_DIR/logging/user-values-opensearch.yaml}" if [ ! -f "$ES_OPEN_USER_YAML" ]; then - log_debug "[$ES_OPEN_USER_YAML] not found. Using $TMP_DIR/empty.yaml" - ES_OPEN_USER_YAML=$TMP_DIR/empty.yaml + log_debug "[$ES_OPEN_USER_YAML] not found. Using $TMP_DIR/empty.yaml" + ES_OPEN_USER_YAML=$TMP_DIR/empty.yaml fi -if [ -z "$(kubectl -n $LOG_NS get secret opensearch-securityconfig -o name 2>/dev/null)" ]; then - - kubectl -n $LOG_NS delete secret opensearch-securityconfig --ignore-not-found - - #Copy OpenSearch Security Configuration files - mkdir -p $TMP_DIR/opensearch/securityconfig - cp logging/opensearch/securityconfig/*.yml $TMP_DIR/opensearch/securityconfig - #Overlay OpenSearch security configuration files from USER_DIR (if exists) - if [ -d "$USER_DIR/logging/opensearch/securityconfig" ]; then - log_debug "OpenSearch Security Configuration directory found w/in USER_DIR [$USER_DIR]" - - if [ "$(ls $USER_DIR/logging/opensearch/securityconfig/*.yml 2>/dev/null)" ]; then - log_info "Copying OpenSearch Security Configuration files from [$USER_DIR/logging/opensearch/securityconfig]" - cp $USER_DIR/logging/opensearch/securityconfig/*.yml $TMP_DIR/opensearch/securityconfig - else - log_debug "No YAML (*.yml) files found in USER_DIR/opensearch/securityconfig directory" - fi - fi - - #create secret containing OpenSearch security configuration yaml files - #NOTE: whitelist.yml file is only created due to apparent bug in OpenSearch - # which causes an ERROR when securityAdmin.sh is run without it - kubectl -n $LOG_NS create secret generic opensearch-securityconfig \ - --from-file $TMP_DIR/opensearch/securityconfig/action_groups.yml \ - --from-file $TMP_DIR/opensearch/securityconfig/allowlist.yml \ - --from-file whitelist.yml=$TMP_DIR/opensearch/securityconfig/allowlist.yml \ - --from-file $TMP_DIR/opensearch/securityconfig/config.yml \ - --from-file $TMP_DIR/opensearch/securityconfig/internal_users.yml \ - --from-file $TMP_DIR/opensearch/securityconfig/nodes_dn.yml \ - --from-file $TMP_DIR/opensearch/securityconfig/roles.yml \ - --from-file $TMP_DIR/opensearch/securityconfig/roles_mapping.yml \ - --from-file $TMP_DIR/opensearch/securityconfig/tenants.yml - - kubectl -n $LOG_NS label secret opensearch-securityconfig managed-by=v4m-es-script +if [ -z "$(kubectl -n "$LOG_NS" get secret opensearch-securityconfig -o name 2> /dev/null)" ]; then + + kubectl -n "$LOG_NS" delete secret opensearch-securityconfig --ignore-not-found + + #Copy OpenSearch Security Configuration files + mkdir -p "$TMP_DIR"/opensearch/securityconfig + cp logging/opensearch/securityconfig/*.yml "$TMP_DIR"/opensearch/securityconfig + #Overlay OpenSearch security configuration files from USER_DIR (if exists) + if [ -d "$USER_DIR/logging/opensearch/securityconfig" ]; then + log_debug "OpenSearch Security Configuration directory found w/in USER_DIR [$USER_DIR]" + + if [ "$(ls "$USER_DIR"/logging/opensearch/securityconfig/*.yml 2> /dev/null)" ]; then + log_info "Copying OpenSearch Security Configuration files from [$USER_DIR/logging/opensearch/securityconfig]" + cp "$USER_DIR"/logging/opensearch/securityconfig/*.yml "$TMP_DIR"/opensearch/securityconfig + else + log_debug "No YAML (*.yml) files found in USER_DIR/opensearch/securityconfig directory" + fi + fi + + #create secret containing OpenSearch security configuration yaml files + #NOTE: whitelist.yml file is only created due to apparent bug in OpenSearch + # which causes an ERROR when securityAdmin.sh is run without it + kubectl -n "$LOG_NS" create secret generic opensearch-securityconfig \ + --from-file "$TMP_DIR"/opensearch/securityconfig/action_groups.yml \ + --from-file "$TMP_DIR"/opensearch/securityconfig/allowlist.yml \ + --from-file whitelist.yml="$TMP_DIR"/opensearch/securityconfig/allowlist.yml \ + --from-file "$TMP_DIR"/opensearch/securityconfig/config.yml \ + --from-file "$TMP_DIR"/opensearch/securityconfig/internal_users.yml \ + --from-file "$TMP_DIR"/opensearch/securityconfig/nodes_dn.yml \ + --from-file "$TMP_DIR"/opensearch/securityconfig/roles.yml \ + --from-file "$TMP_DIR"/opensearch/securityconfig/roles_mapping.yml \ + --from-file "$TMP_DIR"/opensearch/securityconfig/tenants.yml + + kubectl -n "$LOG_NS" label secret opensearch-securityconfig managed-by=v4m-es-script else - log_verbose "Using existing secret [opensearch-securityconfig] for OpenSearch Security Configuration" + log_verbose "Using existing secret [opensearch-securityconfig] for OpenSearch Security Configuration" fi # OpenSearch @@ -293,11 +289,11 @@ LOG_NODE_PLACEMENT_ENABLE=${LOG_NODE_PLACEMENT_ENABLE:-${NODE_PLACEMENT_ENABLE:- # Optional workload node placement support if [ "$LOG_NODE_PLACEMENT_ENABLE" == "true" ]; then - log_verbose "Enabling OpenSearch for workload node placement" - wnpValuesFile="logging/node-placement/values-opensearch-wnp.yaml" + log_verbose "Enabling OpenSearch for workload node placement" + wnpValuesFile="logging/node-placement/values-opensearch-wnp.yaml" else - log_debug "Workload node placement support is disabled for OpenSearch" - wnpValuesFile="$TMP_DIR/empty.yaml" + log_debug "Workload node placement support is disabled for OpenSearch" + wnpValuesFile="$TMP_DIR/empty.yaml" fi OPENSHIFT_SPECIFIC_YAML=$TMP_DIR/empty.yaml @@ -305,25 +301,23 @@ if [ "$OPENSHIFT_CLUSTER" == "true" ]; then OPENSHIFT_SPECIFIC_YAML=logging/openshift/values-opensearch-openshift.yaml fi - # YAML file container auto-generated ingress definitions (or not) if [ ! -f "$autogeneratedYAMLFile" ]; then - log_debug "[$autogeneratedYAMLFile] not found. Using $TMP_DIR/empty.yaml" - autogeneratedYAMLFile="$TMP_DIR/empty.yaml" + log_debug "[$autogeneratedYAMLFile] not found. Using $TMP_DIR/empty.yaml" + autogeneratedYAMLFile="$TMP_DIR/empty.yaml" fi - # Get Helm Chart Name log_debug "OpenSearch Helm Chart: repo [$OPENSEARCH_HELM_CHART_REPO] name [$OPENSEARCH_HELM_CHART_NAME] version [$OPENSEARCH_HELM_CHART_VERSION]" -chart2install="$(get_helmchart_reference $OPENSEARCH_HELM_CHART_REPO $OPENSEARCH_HELM_CHART_NAME $OPENSEARCH_HELM_CHART_VERSION)" -versionstring="$(get_helm_versionstring $OPENSEARCH_HELM_CHART_VERSION)" +chart2install="$(get_helmchart_reference "$OPENSEARCH_HELM_CHART_REPO" "$OPENSEARCH_HELM_CHART_NAME" "$OPENSEARCH_HELM_CHART_VERSION")" +versionstring="$(get_helm_versionstring "$OPENSEARCH_HELM_CHART_VERSION")" log_debug "Installing Helm chart from artifact [$chart2install]" - # Deploy OpenSearch via Helm chart # NOTE: nodeGroup needed to get resource names we want +# shellcheck disable=SC2086 helm $helmDebug upgrade --install opensearch \ - --namespace $LOG_NS \ + --namespace "$LOG_NS" \ --values "$TMP_DIR/opensearch_imagekeysfile.yaml" \ --values "$imageKeysFile" \ --values logging/opensearch/opensearch_helm_values.yaml \ @@ -331,39 +325,37 @@ helm $helmDebug upgrade --install opensearch \ --values "$autogeneratedYAMLFile" \ --values "$ES_OPEN_USER_YAML" \ --values "$OPENSHIFT_SPECIFIC_YAML" \ - --set nodeGroup=primary \ + --set nodeGroup=primary \ --set masterService=v4m-search \ --set fullnameOverride=v4m-search \ $versionstring \ - $chart2install + "$chart2install" # waiting for PVCs to be bound declare -i pvcCounter=0 -pvc_status=$(kubectl -n $LOG_NS get pvc v4m-search-v4m-search-0 -o=jsonpath="{.status.phase}") -until [ "$pvc_status" == "Bound" ] || (( $pvcCounter>90 )); -do - sleep 5 - pvcCounter=$((pvcCounter+5)) - pvc_status=$(kubectl -n $LOG_NS get pvc v4m-search-v4m-search-0 -o=jsonpath="{.status.phase}") +pvc_status=$(kubectl -n "$LOG_NS" get pvc v4m-search-v4m-search-0 -o=jsonpath="{.status.phase}") +until [ "$pvc_status" == "Bound" ] || ((pvcCounter > 90)); do + sleep 5 + pvcCounter=$((pvcCounter + 5)) + pvc_status=$(kubectl -n "$LOG_NS" get pvc v4m-search-v4m-search-0 -o=jsonpath="{.status.phase}") done # Confirm PVC is "bound" (matched) to PV -pvc_status=$(kubectl -n $LOG_NS get pvc v4m-search-v4m-search-0 -o=jsonpath="{.status.phase}") -if [ "$pvc_status" != "Bound" ]; then - log_error "It appears that the PVC [v4m-search-v4m-search-0] associated with the [v4m-search-0] node has not been bound to a PV." - log_error "The status of the PVC is [$pvc_status]" - log_error "After ensuring all claims shown as Pending can be satisfied; run the remove_opensearch.sh script and try again." - exit 1 +pvc_status=$(kubectl -n "$LOG_NS" get pvc v4m-search-v4m-search-0 -o=jsonpath="{.status.phase}") +if [ "$pvc_status" != "Bound" ]; then + log_error "It appears that the PVC [v4m-search-v4m-search-0] associated with the [v4m-search-0] node has not been bound to a PV." + log_error "The status of the PVC is [$pvc_status]" + log_error "After ensuring all claims shown as Pending can be satisfied; run the remove_opensearch.sh script and try again." + exit 1 fi log_verbose "The PVC [v4m-search-v4m-search-0] have been bound to PVs" # Need to wait 2-3 minutes for the OpenSearch to come up and running log_info "Waiting on OpenSearch pods to be Ready" -kubectl -n $LOG_NS wait pods v4m-search-0 --for=condition=Ready --timeout=10m - +kubectl -n "$LOG_NS" wait pods v4m-search-0 --for=condition=Ready --timeout=10m # TO DO: Convert to curl command to detect ES is up? -# hitting https:/host:port -u adminuser:adminpwd --insecure +# hitting https:/host:port -u adminuser:adminpwd --insecure # returns "OpenDistro Security not initialized." and 503 when up log_verbose "Waiting [2] minutes to allow OpenSearch to initialize [$(date)]" sleep 120 @@ -372,27 +364,27 @@ set +e # Run the security admin script on the pod # Add some logic to find ES release -if [ "$existingSearch" == "false" ] ; then - kubectl -n $LOG_NS exec v4m-search-0 -c opensearch -- config/run_securityadmin.sh - # Retrieve log file from security admin script - kubectl -n $LOG_NS cp v4m-search-0:config/run_securityadmin.log $TMP_DIR/run_securityadmin.log -c opensearch - if [ "$(tail -n1 $TMP_DIR/run_securityadmin.log)" == "Done with success" ]; then - log_verbose "The run_securityadmin.log script appears to have run successfully; you can review its output below:" - else - log_warn "There may have been a problem with the run_securityadmin.log script; review the output below:" - fi - # show output from run_securityadmin.sh script - sed 's/^/ | /' $TMP_DIR/run_securityadmin.log +if [ "$existingSearch" == "false" ]; then + kubectl -n "$LOG_NS" exec v4m-search-0 -c opensearch -- config/run_securityadmin.sh + # Retrieve log file from security admin script + kubectl -n "$LOG_NS" cp v4m-search-0:config/run_securityadmin.log "$TMP_DIR"/run_securityadmin.log -c opensearch + if [ "$(tail -n1 "$TMP_DIR"/run_securityadmin.log)" == "Done with success" ]; then + log_verbose "The run_securityadmin.log script appears to have run successfully; you can review its output below:" + else + log_warn "There may have been a problem with the run_securityadmin.log script; review the output below:" + fi + # show output from run_securityadmin.sh script + sed 's/^/ | /' "$TMP_DIR"/run_securityadmin.log else - log_verbose "Existing OpenSearch release found. Skipping OpenSearch security initialization." + log_verbose "Existing OpenSearch release found. Skipping OpenSearch security initialization." fi set -e #Container Security: Disable serviceAccount Token Automounting if [ "$OPENSHIFT_CLUSTER" == "true" ]; then - disable_sa_token_automount $LOG_NS v4m-os - #NOTE: On other providers, OpenSearch pods linked to the 'default' serviceAccount + disable_sa_token_automount "$LOG_NS" v4m-os + #NOTE: On other providers, OpenSearch pods linked to the 'default' serviceAccount fi log_info "OpenSearch has been deployed" diff --git a/logging/bin/deploy_opensearch_content.sh b/logging/bin/deploy_opensearch_content.sh index d0976b30..0302e6dc 100755 --- a/logging/bin/deploy_opensearch_content.sh +++ b/logging/bin/deploy_opensearch_content.sh @@ -3,21 +3,21 @@ # Copyright © 2022, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh source logging/bin/secrets-include.sh source logging/bin/apiaccess-include.sh source logging/bin/rbac-include.sh -this_script=`basename "$0"` +this_script=$(basename "$0") log_debug "Script [$this_script] has started [$(date)]" ES_CONTENT_DEPLOY=${ES_CONTENT_DEPLOY:-${ELASTICSEARCH_ENABLE:-true}} if [ "$ES_CONTENT_DEPLOY" != "true" ]; then - log_verbose "Environment variable [ES_CONTENT_DEPLOY] is not set to 'true'; exiting WITHOUT deploying content into OpenSearch" - exit 0 + log_verbose "Environment variable [ES_CONTENT_DEPLOY] is not set to 'true'; exiting WITHOUT deploying content into OpenSearch" + exit 0 fi log_info "Loading Content into OpenSearch" @@ -30,198 +30,203 @@ set -e # check for pre-reqs # Confirm namespace exists -if [ "$(kubectl get ns $LOG_NS -o name 2>/dev/null)" == "" ]; then - log_error "Namespace [$LOG_NS] does NOT exist." - exit 1 +if [ -z "$(kubectl get ns "$LOG_NS" -o name 2> /dev/null)" ]; then + log_error "Namespace [$LOG_NS] does NOT exist." + exit 1 fi - # get credentials get_credentials_from_secret admin rc=$? -if [ "$rc" != "0" ] ;then log_debug "RC=$rc"; exit $rc;fi +if [ "$rc" != "0" ]; then + log_debug "RC=$rc" + exit $rc +fi get_ism_api_url # Confirm OpenSearch is ready -for pause in 30 30 30 30 30 30 60 -do - response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "$es_api_url" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - # returns 503 (and outputs "Open Distro Security not initialized.") when ODFE isn't ready yet - # TO DO: check for 503 specifically? - - if [[ $response != 2* ]]; then - log_verbose "The OpenSearch REST endpoint does not appear to be quite ready [$response]; sleeping for [$pause] more seconds before checking again." - sleep ${pause} - else - log_debug "The OpenSearch REST endpoint appears to be ready...continuing" - esready="TRUE" - break - fi +for pause in 30 30 30 30 30 30 60; do + response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "$es_api_url" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + # returns 503 (and outputs "Open Distro Security not initialized.") when ODFE isn't ready yet + # TO DO: check for 503 specifically? + + if [[ $response != 2* ]]; then + log_verbose "The OpenSearch REST endpoint does not appear to be quite ready [$response]; sleeping for [$pause] more seconds before checking again." + sleep ${pause} + else + log_debug "The OpenSearch REST endpoint appears to be ready...continuing" + esready="TRUE" + break + fi done if [ "$esready" != "TRUE" ]; then - log_error "The OpenSearch REST endpoint has NOT become accessible in the expected time; exiting." - log_error "Review the OpenSearch pod's events and log to identify the issue and resolve it before trying again." - exit 1 + log_error "The OpenSearch REST endpoint has NOT become accessible in the expected time; exiting." + log_error "Review the OpenSearch pod's events and log to identify the issue and resolve it before trying again." + exit 1 fi - # Create Index Management (I*M) Policy objects function set_retention_period { - #Arguments - policy_name=$1 # Name of policy...also, used to construct name of json file to load - retention_period_var=$2 # Name of env var that can be used to specify retention period + #Arguments + # policy_name Name of policy...also, used to construct name of json file to load + # retention_period_var Name of env var that can be used to specify retention period + + policy_name=$1 + retention_period_var=$2 - log_debug "Function called: set_retention_perid ARGS: $@" + # shellcheck disable=2145 + log_debug "Function called: set_retention_perid ARGS: $@" - retention_period=${!retention_period_var} # Retention Period (unit: days) + retention_period=${!retention_period_var} # Retention Period (unit: days) - digits_re='^[0-9]+$' + digits_re='^[0-9]+$' - cp logging/opensearch/${policy_name}.json $TMP_DIR/$policy_name.json + cp logging/opensearch/"${policy_name}".json "$TMP_DIR"/"$policy_name".json - # confirm value is number - if ! [[ $retention_period =~ $digits_re ]]; then - log_error "An invalid valid was provided for [$retention_period_var]; exiting." - exit 1 - fi + # confirm value is number + if ! [[ $retention_period =~ $digits_re ]]; then + log_error "An invalid valid was provided for [$retention_period_var]; exiting." + exit 1 + fi - #Update retention period in json file prior to loading it - sed -i'.bak' "s/\"min_index_age\": \"xxxRETENTION_PERIODxxx\"/\"min_index_age\": \"${retention_period}d\"/g" $TMP_DIR/$policy_name.json + #Update retention period in json file prior to loading it + sed -i'.bak' "s/\"min_index_age\": \"xxxRETENTION_PERIODxxx\"/\"min_index_age\": \"${retention_period}d\"/g" "$TMP_DIR"/"$policy_name".json - log_debug "Contents of $policy_name.json after substitution:" - log_debug "$(cat $TMP_DIR/${policy_name}.json)" + log_debug "Contents of $policy_name.json after substitution:" + log_debug "$(cat "$TMP_DIR"/"${policy_name}".json)" - # Load policy into OpenSearch via API - response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$ism_api_url/policies/$policy_name" -H 'Content-Type: application/json' -d @$TMP_DIR/$policy_name.json --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - if [[ $response == 409 ]]; then - log_info "The index management policy [$policy_name] already exist in OpenSearch; skipping load and using existing policy." - elif [[ $response != 2* ]]; then - log_error "There was an issue loading index management policy [$policy_name] into OpenSearch [$response]" - exit 1 - else - log_debug "Index management policy [$policy_name] loaded into OpenSearch [$response]" - fi + # Load policy into OpenSearch via API + response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$ism_api_url/policies/$policy_name" -H 'Content-Type: application/json' -d @"$TMP_DIR"/"$policy_name".json --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + if [[ $response == 409 ]]; then + log_info "The index management policy [$policy_name] already exist in OpenSearch; skipping load and using existing policy." + elif [[ $response != 2* ]]; then + log_error "There was an issue loading index management policy [$policy_name] into OpenSearch [$response]" + exit 1 + else + log_debug "Index management policy [$policy_name] loaded into OpenSearch [$response]" + fi } #Patch ODFE 1.7.0 ISM policies to ODFE 1.13.x format function add_ism_template { - local policy_name pattern - - #Arguments - policy_name=$1 # Name of policy - pattern=$2 # Index pattern to associate with policy - priority=${3:-100} # Index Priority (Higher values ==> reloaded first) - - response=$(curl -s -o $TMP_DIR/ism_policy_patch.json -w "%{http_code}" -XGET "$ism_api_url/policies/$policy_name" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - if [[ $response != 2* ]]; then - log_debug "No ISM policy [$policy_name] found to patch; moving on.[$response]" - return - fi - - if [ -n "$(cat $TMP_DIR/ism_policy_patch.json |grep '"ism_template":null')" ]; then - log_debug "No ISM Template on policy [$policy_name]; adding one." - - #remove crud returned but not needed - sed -i'.bak' "s/\"_id\":\"${policy_name}\",//;s/\"_version\":[0-9]*,//;s/\"_seq_no\":[0-9]*,//;s/\"_primary_term\":[0-9]*,//" $TMP_DIR/ism_policy_patch.json - - #add ISM_Template to existing ISM policy - sed -i'.bak' "s/\"ism_template\":null/\"ism_template\": {\"index_patterns\": \[\"${pattern}\"\],\"priority\":${priority}}/g" $TMP_DIR/ism_policy_patch.json - - #delete exisiting policy - response=$(curl -s -o /dev/null -w "%{http_code}" -XDELETE "$ism_api_url/policies/$policy_name" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - if [[ $response != 2* ]]; then - log_warn "Error encountered deleting index management policy [$policy_name] before patching to add ISM template stanza [$response]." - log_warn "Review the index managment policy [$policy_name] within OpenSearch Dashboards to ensure it is properly configured and linked to appropriate indexes [$pattern]." - return - else - log_debug "Index policy [$policy_name] deleted [$response]." - fi - - #handle change in policy name w/ our 1.1.0 release - if [ "$policy_name" == "viya_infra_idxmgmt_policy" ]; then - sed -i'.bak' "s/viya_infra_idxmgmt_policy/viya-infra-idxmgmt-policy/g" $TMP_DIR/ism_policy_patch.json - policy_name="viya-infra-idxmgmt-policy" - fi - - - #load revised policy - response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$ism_api_url/policies/$policy_name" -H 'Content-Type: application/json' -d "@$TMP_DIR/ism_policy_patch.json" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - if [[ $response != 2* ]]; then - log_warn "Unable to update index management policy [$policy_name] to add a ISM_TEMPLATE stanza [$response]" - log_warn "Review/create the index managment policy [$policy_name] within OpenSearch Dashboards to ensure it is properly configured and linked to appropriate indexes [$pattern]." - return - else - log_info "Index management policy [$policy_name] loaded into OpenSearch [$response]" - fi - else - log_debug "The policy definition for [$policy_name] already includes an ISM Template stanza; no need to patch." - return - fi + local policy_name pattern + + #Arguments + # policy_name Name of policy + # pattern Index pattern to associate with policy + # priority Index Priority (Higher values ==> reloaded first) + + policy_name=$1 + pattern=$2 + priority=${3:-100} + + response=$(curl -s -o "$TMP_DIR"/ism_policy_patch.json -w "%{http_code}" -XGET "$ism_api_url/policies/$policy_name" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + if [[ $response != 2* ]]; then + log_debug "No ISM policy [$policy_name] found to patch; moving on.[$response]" + return + fi + + if grep -q '"ism_template":null' "$TMP_DIR"/ism_policy_patch.json; then + log_debug "No ISM Template on policy [$policy_name]; adding one." + + #remove crud returned but not needed + sed -i'.bak' "s/\"_id\":\"${policy_name}\",//;s/\"_version\":[0-9]*,//;s/\"_seq_no\":[0-9]*,//;s/\"_primary_term\":[0-9]*,//" "$TMP_DIR"/ism_policy_patch.json + + #add ISM_Template to existing ISM policy + sed -i'.bak' "s/\"ism_template\":null/\"ism_template\": {\"index_patterns\": \[\"${pattern}\"\],\"priority\":${priority}}/g" "$TMP_DIR"/ism_policy_patch.json + + #delete exisiting policy + response=$(curl -s -o /dev/null -w "%{http_code}" -XDELETE "$ism_api_url/policies/$policy_name" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + if [[ $response != 2* ]]; then + log_warn "Error encountered deleting index management policy [$policy_name] before patching to add ISM template stanza [$response]." + log_warn "Review the index managment policy [$policy_name] within OpenSearch Dashboards to ensure it is properly configured and linked to appropriate indexes [$pattern]." + return + else + log_debug "Index policy [$policy_name] deleted [$response]." + fi + + #handle change in policy name w/ our 1.1.0 release + if [ "$policy_name" == "viya_infra_idxmgmt_policy" ]; then + sed -i'.bak' "s/viya_infra_idxmgmt_policy/viya-infra-idxmgmt-policy/g" "$TMP_DIR"/ism_policy_patch.json + policy_name="viya-infra-idxmgmt-policy" + fi + + #load revised policy + response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$ism_api_url/policies/$policy_name" -H 'Content-Type: application/json' -d "@$TMP_DIR/ism_policy_patch.json" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + if [[ $response != 2* ]]; then + log_warn "Unable to update index management policy [$policy_name] to add a ISM_TEMPLATE stanza [$response]" + log_warn "Review/create the index managment policy [$policy_name] within OpenSearch Dashboards to ensure it is properly configured and linked to appropriate indexes [$pattern]." + return + else + log_info "Index management policy [$policy_name] loaded into OpenSearch [$response]" + fi + else + log_debug "The policy definition for [$policy_name] already includes an ISM Template stanza; no need to patch." + return + fi } - LOG_RETENTION_PERIOD="${LOG_RETENTION_PERIOD:-3}" set_retention_period viya_logs_idxmgmt_policy LOG_RETENTION_PERIOD -add_ism_template "viya_logs_idxmgmt_policy" "viya_logs-*" 100 +add_ism_template "viya_logs_idxmgmt_policy" "viya_logs-*" 100 # Create Ingest Pipeline to "burst" incoming log messages to separate indexes based on namespace -response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$es_api_url/_ingest/pipeline/viyaburstns" -H 'Content-Type: application/json' -d @logging/opensearch/create_ns_burst_pipeline.json --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) +response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$es_api_url/_ingest/pipeline/viyaburstns" -H 'Content-Type: application/json' -d @logging/opensearch/create_ns_burst_pipeline.json --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) # request returns: {"acknowledged":true} if [[ $response != 2* ]]; then - log_error "There was an issue loading ingest pipeline into OpenSearch [$response]" - exit 1 + log_error "There was an issue loading ingest pipeline into OpenSearch [$response]" + exit 1 else - log_debug "Ingest pipeline definition loaded into OpenSearch [$response]" + log_debug "Ingest pipeline definition loaded into OpenSearch [$response]" fi # Configure index template settings and link Ingest Pipeline to Index Template -response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$es_api_url/_template/viya-logs-template" -H 'Content-Type: application/json' -d @logging/opensearch/set_index_template_settings_logs.json --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure ) +response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$es_api_url/_template/viya-logs-template" -H 'Content-Type: application/json' -d @logging/opensearch/set_index_template_settings_logs.json --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) # request returns: {"acknowledged":true} if [[ $response != 2* ]]; then - log_error "There was an issue loading index template settings into OpenSearch [$response]" - exit 1 + log_error "There was an issue loading index template settings into OpenSearch [$response]" + exit 1 else - log_debug "Index template settings loaded into OpenSearch [$response]" + log_debug "Index template settings loaded into OpenSearch [$response]" fi if [ "$OPENSHIFT_CLUSTER" == "true" ]; then - # INFRASTRUCTURE LOGS - # Handle "infrastructure" logs differently - INFRA_LOG_RETENTION_PERIOD="${INFRA_LOG_RETENTION_PERIOD:-1}" - set_retention_period viya_infra_idxmgmt_policy INFRA_LOG_RETENTION_PERIOD - add_ism_template "viya_infra_idxmgmt_policy" "viya_logs-openshift-*" 5 - - # Link index management policy Index Template - response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$es_api_url/_template/viya-infra-template" -H 'Content-Type: application/json' -d @logging/opensearch/set_index_template_settings_infra_openshift.json --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure ) - # request returns: {"acknowledged":true} - if [[ $response != 2* ]]; then - log_error "There was an issue loading infrastructure index template settings into OpenSearch [$response]" - exit 1 - else - log_info "Infrastructure index template settings loaded into OpenSearch [$response]" - fi + # INFRASTRUCTURE LOGS + # Handle "infrastructure" logs differently + INFRA_LOG_RETENTION_PERIOD="${INFRA_LOG_RETENTION_PERIOD:-1}" + set_retention_period viya_infra_idxmgmt_policy INFRA_LOG_RETENTION_PERIOD + add_ism_template "viya_infra_idxmgmt_policy" "viya_logs-openshift-*" 5 + + # Link index management policy Index Template + response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$es_api_url/_template/viya-infra-template" -H 'Content-Type: application/json' -d @logging/opensearch/set_index_template_settings_infra_openshift.json --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + # request returns: {"acknowledged":true} + if [[ $response != 2* ]]; then + log_error "There was an issue loading infrastructure index template settings into OpenSearch [$response]" + exit 1 + else + log_info "Infrastructure index template settings loaded into OpenSearch [$response]" + fi fi - # METALOGGING: Create index management policy object & link policy to index template # ...index management policy automates the deletion of indexes after the specified time OPS_LOG_RETENTION_PERIOD="${OPS_LOG_RETENTION_PERIOD:-1}" set_retention_period viya_ops_idxmgmt_policy OPS_LOG_RETENTION_PERIOD -add_ism_template "viya_ops_idxmgmt_policy" "viya_ops-*" 50 +add_ism_template "viya_ops_idxmgmt_policy" "viya_ops-*" 50 # Load template -response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$es_api_url/_template/viya-ops-template" -H 'Content-Type: application/json' -d @logging/opensearch/set_index_template_settings_ops.json --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) +response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$es_api_url/_template/viya-ops-template" -H 'Content-Type: application/json' -d @logging/opensearch/set_index_template_settings_ops.json --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) # request returns: {"acknowledged":true} if [[ $response != 2* ]]; then - log_error "There was an issue loading monitoring index template settings into OpenSearch [$response]" - exit 1 + log_error "There was an issue loading monitoring index template settings into OpenSearch [$response]" + exit 1 else - log_debug "Monitoring index template template settings loaded into OpenSearch [$response]" + log_debug "Monitoring index template template settings loaded into OpenSearch [$response]" fi echo "" @@ -236,46 +241,44 @@ LOGGING_DRIVER=true ./logging/bin/security_create_rbac.sh _all_ _all_ LOG_CREATE_LOGADM_USER=${LOG_CREATE_LOGADM_USER:-true} if [ "$LOG_CREATE_LOGADM_USER" == "true" ]; then - if user_exists logadm; then - log_warn "A user 'logadm' already exists; leaving that user as-is. Review its definition in OpenSearch Dashboards and update it, or create another user, as needed." - else - log_debug "Creating the 'logadm' user" - - LOG_LOGADM_PASSWD=${LOG_LOGADM_PASSWD:-$ES_ADMIN_PASSWD} - if [ -z "$LOG_LOGADM_PASSWD" ]; then - log_debug "Creating a random password for the 'logadm' user" - LOG_LOGADM_PASSWD="$(randomPassword)" - add_notice "" - add_notice "**The OpenSearch 'logadm' Account**" - add_notice "Generated 'logadm' password: $LOG_LOGADM_PASSWD" - fi - - #create the user - LOGGING_DRIVER=true ./logging/bin/user.sh CREATE -ns _all_ -t _all_ -u logadm -p $LOG_LOGADM_PASSWD - fi + if user_exists logadm; then + log_warn "A user 'logadm' already exists; leaving that user as-is. Review its definition in OpenSearch Dashboards and update it, or create another user, as needed." + else + log_debug "Creating the 'logadm' user" + + LOG_LOGADM_PASSWD=${LOG_LOGADM_PASSWD:-$ES_ADMIN_PASSWD} + if [ -z "$LOG_LOGADM_PASSWD" ]; then + log_debug "Creating a random password for the 'logadm' user" + LOG_LOGADM_PASSWD="$(randomPassword)" + add_notice "" + add_notice "**The OpenSearch 'logadm' Account**" + add_notice "Generated 'logadm' password: $LOG_LOGADM_PASSWD" + fi + + #create the user + LOGGING_DRIVER=true ./logging/bin/user.sh CREATE -ns _all_ -t _all_ -u logadm -p "$LOG_LOGADM_PASSWD" + fi else - log_debug "Skipping creation of 'logadm' user because LOG_CREATE_LOGADM_USER not 'true' [$LOG_CREATE_LOGADM_USER]" + log_debug "Skipping creation of 'logadm' user because LOG_CREATE_LOGADM_USER not 'true' [$LOG_CREATE_LOGADM_USER]" fi #Initialize OSD Reporting Plugin indices INIT_OSD_RPT_IDX=${INIT_OSD_RPT_IDX:-true} if [ "$INIT_OSD_RPT_IDX" == "true" ]; then - log_debug "Initializing OpenSearch Dashboards Reporting plugin indices" - response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "$es_api_url/_plugins/_reports/instances" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - log_debug "OSD_RPT_IDX (instances) Response [$response]" - response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "$es_api_url/_plugins/_reports/definitions" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - log_debug "OSD_RPT_IDX (definitions) Response [$response]" + log_debug "Initializing OpenSearch Dashboards Reporting plugin indices" + response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "$es_api_url/_plugins/_reports/instances" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + log_debug "OSD_RPT_IDX (instances) Response [$response]" + response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "$es_api_url/_plugins/_reports/definitions" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + log_debug "OSD_RPT_IDX (definitions) Response [$response]" fi - LOGGING_DRIVER=${LOGGING_DRIVER:-false} if [ "$LOGGING_DRIVER" != "true" ]; then - echo "" - display_notices - echo "" + echo "" + display_notices + echo "" fi - log_info "Content has been loaded into OpenSearch" log_debug "Script [$this_script] has completed [$(date)]" diff --git a/logging/bin/deploy_openshift_prereqs.sh b/logging/bin/deploy_openshift_prereqs.sh index 6e338781..f94a3298 100755 --- a/logging/bin/deploy_openshift_prereqs.sh +++ b/logging/bin/deploy_openshift_prereqs.sh @@ -3,10 +3,10 @@ # Copyright © 2021, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh -this_script=`basename "$0"` +this_script=$(basename "$0") log_debug "Script [$this_script] has started [$(date)]" @@ -14,26 +14,25 @@ log_debug "Script [$this_script] has started [$(date)]" OPENSHIFT_PREREQS_ENABLE=${OPENSHIFT_PREREQS_ENABLE:-true} if [ "$OPENSHIFT_PREREQS_ENABLE" != "true" ]; then - log_info "Environment variable [OPENSHIFT_PREREQS_ENABLE] is not set to 'true'; exiting WITHOUT deploying OpenShift Prerequisites" - exit + log_info "Environment variable [OPENSHIFT_PREREQS_ENABLE] is not set to 'true'; exiting WITHOUT deploying OpenShift Prerequisites" + exit fi - # link OpenSearch serviceAccounts to 'privileged' scc -oc adm policy add-scc-to-user privileged -z v4m-os -n $LOG_NS +oc adm policy add-scc-to-user privileged -z v4m-os -n "$LOG_NS" # create the 'v4m-logging-v2' SCC, if it does not already exist -if oc get scc v4m-logging-v2 2>/dev/null 1>&2; then - log_info "Skipping scc creation; using existing scc [v4m-logging-v2]" +if oc get scc v4m-logging-v2 > /dev/null 2>&1; then + log_info "Skipping scc creation; using existing scc [v4m-logging-v2]" else - oc create -f logging/openshift/fb_v4m-logging-v2_scc.yaml + oc create -f logging/openshift/fb_v4m-logging-v2_scc.yaml fi # create the 'v4m-k8sevents' SCC, if it does not already exist -if oc get scc v4m-k8sevents 2>/dev/null 1>&2; then - log_info "Skipping scc creation; using existing scc [v4m-k8sevents]" +if oc get scc v4m-k8sevents > /dev/null 2>&1; then + log_info "Skipping scc creation; using existing scc [v4m-k8sevents]" else - oc create -f logging/openshift/fb_v4m-k8sevents_scc.yaml + oc create -f logging/openshift/fb_v4m-k8sevents_scc.yaml fi log_info "OpenShift Prerequisites have been deployed." diff --git a/logging/bin/deploy_osd.sh b/logging/bin/deploy_osd.sh index 941c1111..4c78b345 100755 --- a/logging/bin/deploy_osd.sh +++ b/logging/bin/deploy_osd.sh @@ -3,22 +3,22 @@ # Copyright © 2022, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh source logging/bin/secrets-include.sh source bin/tls-include.sh source bin/autogenerate-include.sh source logging/bin/apiaccess-include.sh -this_script=`basename "$0"` +this_script=$(basename "$0") log_debug "Script [$this_script] has started [$(date)]" OPENSEARCHDASH_ENABLE=${OPENSEARCHDASH_ENABLE:-true} if [ "$OPENSEARCHDASH_ENABLE" != "true" ]; then - log_verbose "Environment variable [OPENSEARCHDASH_ENABLE] is not set to 'true'; exiting WITHOUT deploying OpenSearch Dashboards" - exit 0 + log_verbose "Environment variable [OPENSEARCHDASH_ENABLE] is not set to 'true'; exiting WITHOUT deploying OpenSearch Dashboards" + exit 0 fi set -e @@ -28,145 +28,142 @@ set -e # #Generate yaml file with all container-related keys -generateImageKeysFile "$OSD_FULL_IMAGE" "logging/opensearch/osd_container_image.template" - +generateImageKeysFile "$OSD_FULL_IMAGE" "logging/opensearch/osd_container_image.template" # Confirm namespace exists -if [ "$(kubectl get ns $LOG_NS -o name 2>/dev/null)" == "" ]; then - log_error "Namespace [$LOG_NS] does NOT exist." - exit 1 +if [ -z "$(kubectl get ns "$LOG_NS" -o name 2> /dev/null)" ]; then + log_error "Namespace [$LOG_NS] does NOT exist." + exit 1 fi # get credentials export ES_KIBANASERVER_PASSWD=${ES_KIBANASERVER_PASSWD} # Create secrets containing internal user credentials -create_user_secret internal-user-kibanaserver kibanaserver "$ES_KIBANASERVER_PASSWD" managed-by=v4m-es-script +create_user_secret internal-user-kibanaserver kibanaserver "$ES_KIBANASERVER_PASSWD" managed-by=v4m-es-script # Verify cert generator is available (if necessary) -if verify_cert_generator $LOG_NS kibana; then - log_debug "cert generator check OK [$cert_generator_ok]" +if verify_cert_generator "$LOG_NS" kibana; then + log_debug "cert generator check OK [$cert_generator_ok]" else - log_error "A required TLS cert does not exist and the expected certificate generator mechanism [$cert_generator] is not available to create the missing cert" - exit 1 + log_error "A required TLS cert does not exist and the expected certificate generator mechanism [$cert_generator] is not available to create the missing cert" + exit 1 fi # Create/Get necessary TLS certs -create_tls_certs $LOG_NS logging kibana +create_tls_certs "$LOG_NS" logging kibana AUTOGENERATE_INGRESS="${AUTOGENERATE_INGRESS:-false}" OSD_INGRESS_ENABLE="${OSD_INGRESS_ENABLE:-true}" if [ "$AUTOGENERATE_INGRESS" == "true" ] && [ "$OSD_INGRESS_ENABLE" == "true" ]; then - autogeneratedYAMLFile="$TMP_DIR/autogenerate-osd.yaml" + autogeneratedYAMLFile="$TMP_DIR/autogenerate-osd.yaml" + + if [ ! -f "$autogeneratedYAMLFile" ]; then + log_debug "Creating file [$autogeneratedYAMLFile]" + touch "$autogeneratedYAMLFile" + else + log_debug "File [$autogeneratedYAMLFile] already exists" + fi - if [ ! -f "$autogeneratedYAMLFile" ]; then - log_debug "Creating file [$autogeneratedYAMLFile]" - touch "$autogeneratedYAMLFile" - else - log_debug "File [$autogeneratedYAMLFile] already exists" - fi + osdIngressCert="${OSD_INGRESS_CERT}" + osdIngressKey="${OSD_INGRESS_KEY}" - osdIngressCert="${OSD_INGRESS_CERT}" - osdIngressKey="${OSD_INGRESS_KEY}" - - create_ingress_certs "$LOG_NS" kibana-ingress-tls-secret "$osdIngressCert" "$osdIngressKey" + create_ingress_certs "$LOG_NS" kibana-ingress-tls-secret "$osdIngressCert" "$osdIngressKey" - ROUTING="${ROUTING:-host}" + ROUTING="${ROUTING:-host}" - ## tested with sample version: 0.2.1 - ingressSampleFile="samples/ingress/${ROUTING}-based-ingress/logging/user-values-osd.yaml" + ## tested with sample version: 0.2.1 + ingressSampleFile="samples/ingress/${ROUTING}-based-ingress/logging/user-values-osd.yaml" - #intialized the yaml file w/appropriate ingress sample - yq -i eval-all '. as $item ireduce ({}; . * $item )' "$autogeneratedYAMLFile" "$ingressSampleFile" + #intialized the yaml file w/appropriate ingress sample + # shellcheck disable=SC2016 + yq -i eval-all '. as $item ireduce ({}; . * $item )' "$autogeneratedYAMLFile" "$ingressSampleFile" - ###OSD_INGRESS_ENABLE="${OSD_INGRESS_ENABLE:-true}" - OSD_FQDN="${OSD_FQDN}" - OSD_PATH="${OSD_PATH:-dashboards}" - if [ -z "$OSD_FQDN" ]; then - if [ "$ROUTING" == "host" ]; then - OSD_FQDN="$OSD_PATH.$BASE_DOMAIN" - else - OSD_FQDN="$BASE_DOMAIN" - fi - fi + OSD_PATH="${OSD_PATH:-dashboards}" + if [ -z "$OSD_FQDN" ]; then + if [ "$ROUTING" == "host" ]; then + OSD_FQDN="$OSD_PATH.$BASE_DOMAIN" + else + OSD_FQDN="$BASE_DOMAIN" + fi + fi - log_debug "OSD_INGRESS_ENABLE [$OSD_INGRESS_ENABLE] OSD_FQDN [$OSD_FQDN] OSD_PATH [$OSD_PATH]" + log_debug "OSD_INGRESS_ENABLE [$OSD_INGRESS_ENABLE] OSD_FQDN [$OSD_FQDN] OSD_PATH [$OSD_PATH]" - export OSD_INGRESS_ENABLE OSD_FQDN OSD_PATH - - yq -i '.ingress.enabled=env(OSD_INGRESS_ENABLE)' $autogeneratedYAMLFile - if [ "$ROUTING" == "host" ]; then - yq -i '.ingress.hosts.[0].host=strenv(OSD_FQDN)' $autogeneratedYAMLFile - yq -i '.ingress.tls.[0].hosts.[0]=env(OSD_FQDN)' $autogeneratedYAMLFile - else + export OSD_INGRESS_ENABLE OSD_FQDN OSD_PATH - export slashpath="/$OSD_PATH" + yq -i '.ingress.enabled=env(OSD_INGRESS_ENABLE)' "$autogeneratedYAMLFile" + if [ "$ROUTING" == "host" ]; then + yq -i '.ingress.hosts.[0].host=strenv(OSD_FQDN)' "$autogeneratedYAMLFile" + yq -i '.ingress.tls.[0].hosts.[0]=env(OSD_FQDN)' "$autogeneratedYAMLFile" + else - yq -i '(.extraEnvs.[] | select(has("name")) | select(.name == "SERVER_BASEPATH")).value=env(slashpath)' $autogeneratedYAMLFile + export slashpath="/$OSD_PATH" - yq -i '.ingress.hosts.[0].host=env(OSD_FQDN)' $autogeneratedYAMLFile - yq -i '.ingress.hosts.[0].paths.[0].path=env(slashpath)' $autogeneratedYAMLFile - yq -i '.ingress.tls.[0].hosts.[0]=env(OSD_FQDN)' $autogeneratedYAMLFile - yq -i '.ingress.annotations["nginx.ingress.kubernetes.io/rewrite-target"]=env(slashpath)' $autogeneratedYAMLFile + yq -i '(.extraEnvs.[] | select(has("name")) | select(.name == "SERVER_BASEPATH")).value=env(slashpath)' "$autogeneratedYAMLFile" - # Need to use printf to preserve newlines - printf -v snippet "rewrite (?i)/$OSD_PATH/(.*) /\$1 break;\nrewrite (?i)/${OSD_PATH}$ / break;" ; - snippet="$snippet" yq -i '.ingress.annotations["nginx.ingress.kubernetes.io/configuration-snippet"]=strenv(snippet)' $autogeneratedYAMLFile + yq -i '.ingress.hosts.[0].host=env(OSD_FQDN)' "$autogeneratedYAMLFile" + yq -i '.ingress.hosts.[0].paths.[0].path=env(slashpath)' "$autogeneratedYAMLFile" + yq -i '.ingress.tls.[0].hosts.[0]=env(OSD_FQDN)' "$autogeneratedYAMLFile" + yq -i '.ingress.annotations["nginx.ingress.kubernetes.io/rewrite-target"]=env(slashpath)' "$autogeneratedYAMLFile" - unset slashpath - fi + # Need to use printf to preserve newlines + printf -v snippet "rewrite (?i)/%s/(.*) /\$1 break;\nrewrite (?i)/%s$ / break;" "$OSD_PATH" "$OSD_PATH" + snippet="$snippet" yq -i '.ingress.annotations["nginx.ingress.kubernetes.io/configuration-snippet"]=strenv(snippet)' "$autogeneratedYAMLFile" + + unset slashpath + fi else - log_debug "Autogeneration of ingresss NOT enabled" + log_debug "Autogeneration of ingresss NOT enabled" fi # enable debug on Helm via env var export HELM_DEBUG="${HELM_DEBUG:-false}" if [ "$HELM_DEBUG" == "true" ]; then - helmDebug="--debug" + helmDebug="--debug" fi -helmRepoAdd opensearch https://opensearch-project.github.io/helm-charts +helmRepoAdd opensearch https://opensearch-project.github.io/helm-charts KB_KNOWN_NODEPORT_ENABLE=${KB_KNOWN_NODEPORT_ENABLE:-false} if [ "$KB_KNOWN_NODEPORT_ENABLE" == "true" ]; then - KIBANA_PORT=31033 - log_verbose "Setting OpenSearch Dashboards service NodePort to $KIBANA_PORT" - nodeport_yaml=logging/opensearch/osd_helm_values_nodeport.yaml + KIBANA_PORT=31033 + log_verbose "Setting OpenSearch Dashboards service NodePort to $KIBANA_PORT" + nodeport_yaml=logging/opensearch/osd_helm_values_nodeport.yaml else - nodeport_yaml=$TMP_DIR/empty.yaml - log_debug "OpenSearch Dashboards service NodePort NOT changed to 'known' port because KB_KNOWN_NODEPORT_ENABLE set to [$KB_KNOWN_NODEPORT_ENABLE]." + nodeport_yaml=$TMP_DIR/empty.yaml + log_debug "OpenSearch Dashboards service NodePort NOT changed to 'known' port because KB_KNOWN_NODEPORT_ENABLE set to [$KB_KNOWN_NODEPORT_ENABLE]." fi - # OpenSearch Dashboards user customizations OSD_USER_YAML="${OSD_USER_YAML:-$USER_DIR/logging/user-values-osd.yaml}" if [ ! -f "$OSD_USER_YAML" ]; then - log_debug "[$OSD_USER_YAML] not found. Using $TMP_DIR/empty.yaml" - OSD_USER_YAML=$TMP_DIR/empty.yaml + log_debug "[$OSD_USER_YAML] not found. Using $TMP_DIR/empty.yaml" + OSD_USER_YAML=$TMP_DIR/empty.yaml fi # Require TLS into OpenSearch Dashboards (nee Kibana)? OSD_TLS_ENABLE=${OSD_TLS_ENABLE:-$TLS_ENABLE} if [ -z "$OSD_TLS_ENABLE" ]; then - #set to 'true' if still not set - OSD_TLS_ENABLE="true" + #set to 'true' if still not set + OSD_TLS_ENABLE="true" fi #(Re)Create secret containing OSD TLS Setting -kubectl -n $LOG_NS delete secret v4m-osd-tls-enabled --ignore-not-found -kubectl -n $LOG_NS create secret generic v4m-osd-tls-enabled --from-literal enable_tls="$OSD_TLS_ENABLE" +kubectl -n "$LOG_NS" delete secret v4m-osd-tls-enabled --ignore-not-found +kubectl -n "$LOG_NS" create secret generic v4m-osd-tls-enabled --from-literal enable_tls="$OSD_TLS_ENABLE" # OpenSearch Dashboards log_info "Deploying OpenSearch Dashboards" # Remove pre-OSD 2.19.0 version due to label changes -if [ "$(kubectl -n $LOG_NS get deployment v4m-osd -o jsonpath={.spec.template.metadata.labels} 2>/dev/null)" == '{"app":"opensearch-dashboards","release":"v4m-osd"}' ]; then - log_debug "An earlier version of OpenSearch Dashboards (>2.19) was found running and will be removed" - kubectl -n $LOG_NS delete deployment v4m-osd +if [ "$(kubectl -n "$LOG_NS" get deployment v4m-osd -o jsonpath="{.spec.template.metadata.labels}" 2> /dev/null)" == '{"app":"opensearch-dashboards","release":"v4m-osd"}' ]; then + log_debug "An earlier version of OpenSearch Dashboards (<2.19) was found running and will be removed" + kubectl -n "$LOG_NS" delete deployment v4m-osd fi # Enable workload node placement? @@ -174,11 +171,11 @@ LOG_NODE_PLACEMENT_ENABLE=${LOG_NODE_PLACEMENT_ENABLE:-${NODE_PLACEMENT_ENABLE:- # Optional workload node placement support if [ "$LOG_NODE_PLACEMENT_ENABLE" == "true" ]; then - log_verbose "Enabling OpenSearch Dashboards for workload node placement" - wnpValuesFile="logging/node-placement/values-osd-wnp.yaml" + log_verbose "Enabling OpenSearch Dashboards for workload node placement" + wnpValuesFile="logging/node-placement/values-osd-wnp.yaml" else - log_debug "Workload node placement support is disabled for OpenSearch Dashboards" - wnpValuesFile="$TMP_DIR/empty.yaml" + log_debug "Workload node placement support is disabled for OpenSearch Dashboards" + wnpValuesFile="$TMP_DIR/empty.yaml" fi OSD_PATH_INGRESS_YAML=$TMP_DIR/empty.yaml @@ -186,7 +183,6 @@ if [ "$OPENSHIFT_CLUSTER:$OPENSHIFT_PATH_ROUTES" == "true:true" ]; then OSD_PATH_INGRESS_YAML=logging/openshift/values-osd-path-route-openshift.yaml fi - OPENSHIFT_SPECIFIC_YAML=$TMP_DIR/empty.yaml if [ "$OPENSHIFT_CLUSTER" == "true" ]; then OPENSHIFT_SPECIFIC_YAML=logging/openshift/values-osd-openshift.yaml @@ -194,22 +190,21 @@ fi # YAML file container auto-generated ingress definitions (or not) if [ ! -f "$autogeneratedYAMLFile" ]; then - log_debug "[$autogeneratedYAMLFile] not found. Using $TMP_DIR/empty.yaml" - autogeneratedYAMLFile="$TMP_DIR/empty.yaml" + log_debug "[$autogeneratedYAMLFile] not found. Using $TMP_DIR/empty.yaml" + autogeneratedYAMLFile="$TMP_DIR/empty.yaml" fi - # Get Helm Chart Name log_debug "OpenSearch Dashboards Helm Chart: repo [$OSD_HELM_CHART_REPO] name [$OSD_HELM_CHART_NAME] version [$OSD_HELM_CHART_VERSION]" -chart2install="$(get_helmchart_reference $OSD_HELM_CHART_REPO $OSD_HELM_CHART_NAME $OSD_HELM_CHART_VERSION)" -versionstring="$(get_helm_versionstring $OSD_HELM_CHART_VERSION)" +chart2install="$(get_helmchart_reference "$OSD_HELM_CHART_REPO" "$OSD_HELM_CHART_NAME" "$OSD_HELM_CHART_VERSION")" +versionstring="$(get_helm_versionstring "$OSD_HELM_CHART_VERSION")" log_debug "Installing Helm chart from artifact [$chart2install]" -# Deploy Elasticsearch via Helm chart +# shellcheck disable=SC2086 helm $helmDebug upgrade --install v4m-osd \ $versionstring \ - --namespace $LOG_NS \ + --namespace "$LOG_NS" \ --values "$imageKeysFile" \ --values logging/opensearch/osd_helm_values.yaml \ --values "$wnpValuesFile" \ @@ -219,16 +214,15 @@ helm $helmDebug upgrade --install v4m-osd \ --values "$OPENSHIFT_SPECIFIC_YAML" \ --values "$OSD_PATH_INGRESS_YAML" \ --set fullnameOverride=v4m-osd \ - $chart2install + "$chart2install" log_info "OpenSearch Dashboards has been deployed" - #Container Security: Disable serviceAccount Token Automounting if [ "$OPENSHIFT_CLUSTER" == "true" ]; then - disable_sa_token_automount $LOG_NS v4m-os + disable_sa_token_automount "$LOG_NS" v4m-os else - disable_sa_token_automount $LOG_NS v4m-osd-dashboards + disable_sa_token_automount "$LOG_NS" v4m-osd-dashboards fi log_debug "Script [$this_script] has completed [$(date)]" diff --git a/logging/bin/deploy_osd_content.sh b/logging/bin/deploy_osd_content.sh index b16aee0b..17b541fb 100755 --- a/logging/bin/deploy_osd_content.sh +++ b/logging/bin/deploy_osd_content.sh @@ -3,83 +3,83 @@ # Copyright © 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 + source logging/bin/common.sh source logging/bin/secrets-include.sh source bin/service-url-include.sh source logging/bin/apiaccess-include.sh source logging/bin/rbac-include.sh -this_script=`basename "$0"` +this_script=$(basename "$0") log_debug "Script [$this_script] has started [$(date)]" KIBANA_CONTENT_DEPLOY=${KIBANA_CONTENT_DEPLOY:-${ELASTICSEARCH_ENABLE:-true}} if [ "$KIBANA_CONTENT_DEPLOY" != "true" ]; then - log_verbose "Environment variable [KIBANA_CONTENT_DEPLOY] is not set to 'true'; exiting WITHOUT deploying content into OpenSearch Dashboards" - exit 0 + log_verbose "Environment variable [KIBANA_CONTENT_DEPLOY] is not set to 'true'; exiting WITHOUT deploying content into OpenSearch Dashboards" + exit 0 fi -# temp file used to capture command output -tmpfile=$TMP_DIR/output.txt - # Confirm namespace exists -if [ "$(kubectl get ns $LOG_NS -o name 2>/dev/null)" == "" ]; then - log_error "Namespace [$LOG_NS] does NOT exist." - exit 1 +if [ -z "$(kubectl get ns "$LOG_NS" -o name 2> /dev/null)" ]; then + log_error "Namespace [$LOG_NS] does NOT exist." + exit 1 fi # get credentials get_credentials_from_secret admin rc=$? -if [ "$rc" != "0" ] ;then log_debug "RC=$rc"; exit $rc;fi +if [ "$rc" != "0" ]; then + log_debug "RC=$rc" + exit $rc +fi set -e log_info "Configuring OpenSearch Dashboards...this may take a few minutes" - # wait for pod to show as "running" and "ready" log_info "Waiting for OpenSearch Dashboards pods to be ready ($(date) - timeout 10m)" -osdlabels="$(kubectl -n $LOG_NS get deployment v4m-osd -o=jsonpath='{.spec.selector.matchLabels}'| tr -d '{}"' | tr : '=')" +osdlabels="$(kubectl -n "$LOG_NS" get deployment v4m-osd -o=jsonpath='{.spec.selector.matchLabels}' | tr -d '{}"' | tr : '=')" -kubectl -n $LOG_NS wait pods --selector "$osdlabels" --for condition=Ready --timeout=10m +kubectl -n "$LOG_NS" wait pods --selector "$osdlabels" --for condition=Ready --timeout=10m -set +e # disable exit on error +set +e # disable exit on error # Need to wait 2-3 minutes for OSD to come up and # and be ready to accept the curl commands below # Confirm OSD is ready log_info "Waiting (up to more 8 minutes) for OpenSearch Dashboards API endpoint to be ready" -for pause in 30 30 60 30 30 30 30 30 30 60 60 60 -do - - get_kb_api_url - response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "${kb_api_url}/api/status" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - # returns 503 (and outputs "Kibana server is not ready yet") when Kibana isn't ready yet - # TO DO: check for 503 specifically? - rc=$? - if [[ $response != 2* ]]; then - log_debug "The OpenSearch Dashboards REST endpoint does not appear to be quite ready [$response/$rc]; sleeping for [$pause] more seconds before checking again." - stop_kb_portforwarding - sleep ${pause} - else - log_verbose "The OpenSearch Dashboards REST endpoint appears to be ready...continuing" - kibanaready="TRUE" - break - fi +for pause in 30 30 60 30 30 30 30 30 30 60 60 60; do + + get_kb_api_url + response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "${kb_api_url}/api/status" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + + # returns 503 (and outputs "Kibana server is not ready yet") when Kibana isn't ready yet + # TO DO: check for 503 specifically? + rc=$? + if [[ $response != 2* ]]; then + log_debug "The OpenSearch Dashboards REST endpoint does not appear to be quite ready [$response/$rc]; sleeping for [$pause] more seconds before checking again." + stop_kb_portforwarding + sleep ${pause} + else + log_verbose "The OpenSearch Dashboards REST endpoint appears to be ready...continuing" + kibanaready="TRUE" + break + fi done set -e if [ "$kibanaready" != "TRUE" ]; then - log_error "The OpenSearch Dashboards REST endpoint has NOT become accessible in the expected time; exiting." - log_error "Review the OpenSearch Dashboards pod's events and log to identify the issue and resolve it before trying again." - exit 1 + log_error "The OpenSearch Dashboards REST endpoint has NOT become accessible in the expected time; exiting." + log_error "Review the OpenSearch Dashboards pod's events and log to identify the issue and resolve it before trying again." + exit 1 fi -set +e # disable exit on error +set +e # disable exit on error # get Security API URL get_sec_api_url @@ -88,21 +88,21 @@ get_sec_api_url # Should only be true during UIP scenario b/c our updated securityconfig processing # is bypassed (to prevent clobbering post-deployment changes made via OSD). if ! kibana_tenant_exists "cluster_admins"; then - create_kibana_tenant "cluster_admins" "Tenant space for Cluster Administrators" - rc=$? - if [ "$rc" != "0" ]; then - log_error "Problems were encountered while attempting to create tenant space [cluster_admins]." - exit 1 - fi + create_kibana_tenant "cluster_admins" "Tenant space for Cluster Administrators" + rc=$? + if [ "$rc" != "0" ]; then + log_error "Problems were encountered while attempting to create tenant space [cluster_admins]." + exit 1 + fi else - log_debug "The OpenSearch Dashboards tenant space [cluster_admins] exists." + log_debug "The OpenSearch Dashboards tenant space [cluster_admins] exists." fi # Import OSD Searches, Visualizations and Dashboard Objects using curl -./logging/bin/import_osd_content.sh logging/osd/common cluster_admins -./logging/bin/import_osd_content.sh logging/osd/cluster_admins cluster_admins -./logging/bin/import_osd_content.sh logging/osd/namespace cluster_admins -./logging/bin/import_osd_content.sh logging/osd/tenant cluster_admins +./logging/bin/import_osd_content.sh logging/osd/common cluster_admins +./logging/bin/import_osd_content.sh logging/osd/cluster_admins cluster_admins +./logging/bin/import_osd_content.sh logging/osd/namespace cluster_admins +./logging/bin/import_osd_content.sh logging/osd/tenant cluster_admins log_info "Configuring OpenSearch Dashboards has been completed" diff --git a/logging/bin/import_osd_content.sh b/logging/bin/import_osd_content.sh index 93d50e9e..b245a8e0 100755 --- a/logging/bin/import_osd_content.sh +++ b/logging/bin/import_osd_content.sh @@ -3,113 +3,111 @@ # Copyright © 2022,2021, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh -this_script=`basename "$0"` +this_script=$(basename "$0") source logging/bin/secrets-include.sh source logging/bin/apiaccess-include.sh source logging/bin/rbac-include.sh - function import_file { - # Loads a .ndjson file into OpenSearch Dashboards - # - # Returns: 0 - Content loaded successfully - # 1 - Content was not loaded successfully - - local file filename response - file=$1 - - if [ -f "$file" ]; then - # ODFE 1.7.0: successful request returns: {"success":true,"successCount":20} - # ODFE 1.13.x: successful request returns: {"successCount":1,"success":true,"successResults":[...content details...]} - response=$(curl -s -o $TMP_DIR/curl.response -w "%{http_code}" -XPOST "${kb_api_url}/api/saved_objects/_import?overwrite=true" -H "securitytenant: $tenant" -H "$LOG_XSRF_HEADER" --form file=@$file --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure ) - - if [[ $response == 2* ]]; then - if grep -q '"success":true' $TMP_DIR/curl.response ; then - log_verbose "Deployed content from file [$file] - Success! [$response]" - else - log_warn "Unable to deploy content from file [$file]. [$response]" - log_verbose " Response received was: $(cat $TMP_DIR/curl.response)" - #log_message "" # null line since response file may not contain LF - fi - return 0 - else - log_warn "Error encountered while deploying content from file [$file]. [$response]" - return 1 - fi - fi + # Loads a .ndjson file into OpenSearch Dashboards + # + # Returns: 0 - Content loaded successfully + # 1 - Content was not loaded successfully + + local file response + file=$1 + + if [ -f "$file" ]; then + # ODFE 1.7.0: successful request returns: {"success":true,"successCount":20} + # ODFE 1.13.x: successful request returns: {"successCount":1,"success":true,"successResults":[...content details...]} + response=$(curl -s -o "$TMP_DIR"/curl.response -w "%{http_code}" -XPOST "${kb_api_url}/api/saved_objects/_import?overwrite=true" -H "securitytenant: $tenant" -H "$LOG_XSRF_HEADER" --form file=@"$file" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + + if [[ $response == 2* ]]; then + if grep -q '"success":true' "$TMP_DIR"/curl.response; then + log_verbose "Deployed content from file [$file] - Success! [$response]" + else + log_warn "Unable to deploy content from file [$file]. [$response]" + log_verbose " Response received was: $(cat "$TMP_DIR"/curl.response)" + #log_message "" # null line since response file may not contain LF + fi + return 0 + else + log_warn "Error encountered while deploying content from file [$file]. [$response]" + return 1 + fi + fi } function import_content_batch { - # Loads content from a directory in a single API call - # - # Returns: 0 - No load issues - # 1 - At least one load issue encountered - - local dir rc f tmpfile item_count - dir=$1 - - tmpfile=$TMP_DIR/batched.ndjson - touch $tmpfile - - rc=0 - item_count=0 - for f in $dir/*.ndjson; do - if [ -f "$f" ]; then - log_debug "Adding $f to $tmpfile" - cat $f >>$tmpfile - echo " " >> $tmpfile - ((item_count++)) - fi - done - - if [[ "$item_count" -gt 0 ]]; then - log_debug "$item_count items packed into $tmpfile for loading" - import_file $tmpfile - else - log_debug "No content found in [$dir] to be loaded" - fi - return $? + # Loads content from a directory in a single API call + # + # Returns: 0 - No load issues + # 1 - At least one load issue encountered + + local dir rc f tmpfile item_count + dir=$1 + + tmpfile=$TMP_DIR/batched.ndjson + touch "$tmpfile" + + rc=0 + item_count=0 + for f in "$dir"/*.ndjson; do + if [ -f "$f" ]; then + log_debug "Adding $f to $tmpfile" + cat "$f" >> "$tmpfile" + echo " " >> "$tmpfile" + ((item_count++)) + fi + done + + if [[ $item_count -gt 0 ]]; then + log_debug "$item_count items packed into $tmpfile for loading" + import_file "$tmpfile" + else + log_debug "No content found in [$dir] to be loaded" + fi + return $? } + function import_content { - # Loads content from a directory - # - # Returns: 0 - No load issues - # 1 - At least one load issue encountered - - local dir rc f - dir=$1 - - rc=0 - for f in $dir/*.ndjson; do - if [ -f "$f" ]; then - import_file $f - if [ "$?" != "0" ]; then - rc=1 - fi - fi - done - return $rc + # Loads content from a directory + # + # Returns: 0 - No load issues + # 1 - At least one load issue encountered + + local dir rc f + dir=$1 + + rc=0 + for f in "$dir"/*.ndjson; do + if [ -f "$f" ]; then + if ! import_file "$f"; then + rc=1 + fi + fi + done + return $rc } - # # Process input parms and args # if [ "$#" != "2" ]; then - log_error "Invalid set of arguments" - log_message "" - log_message "Usage: $this_script [CONTENT_LOCATION] [TENANT_SPACE]" - log_message "" - log_message "Loads content from the specified location into the specified tenant space." - log_message "" - log_message " CONTENT_LOCATION - (Required) The location, either a single file or a directory, containing content to be imported. Note: content must be in form of .ndjson files." - log_message " TENANT_SPACE - (Required) The tenant space to which the content should be imported. Note: the tenant space must already exist." - log_message "" - exit 1 + log_error "Invalid set of arguments" + log_message "" + log_message "Usage: $this_script [CONTENT_LOCATION] [TENANT_SPACE]" + log_message "" + log_message "Loads content from the specified location into the specified tenant space." + log_message "" + log_message " CONTENT_LOCATION - (Required) The location, either a single file or a directory, containing content to be imported. Note: content must be in form of .ndjson files." + log_message " TENANT_SPACE - (Required) The tenant space to which the content should be imported. Note: the tenant space must already exist." + log_message "" + exit 1 fi #Flag to suppress file/directory not found error @@ -120,82 +118,80 @@ batch_kibana_content="${BATCH_KIBANA_CONTENT:-true}" get_kb_api_url if [ -z "$kb_api_url" ]; then - log_error "Unable to determine OpenSearch Dashboards URL" - exit 1 + log_error "Unable to determine OpenSearch Dashboards URL" + exit 1 else - log_debug "OSD URL: $kb_api_url" + log_debug "OSD URL: $kb_api_url" fi - get_sec_api_url if [ -z "$sec_api_url" ]; then - log_error "Unable to determine URL to security API endpoint" - exit 1 + log_error "Unable to determine URL to security API endpoint" + exit 1 fi if [ -z "$2" ]; then - log_error "The required parameter [TENANT_SPACE] was NOT specified; please specify a OpenSearch Dashboards tenant space." - exit 1 + log_error "The required parameter [TENANT_SPACE] was NOT specified; please specify a OpenSearch Dashboards tenant space." + exit 1 else - tenant=$2 + tenant=$2 fi # Convert tenant to all lower-case -tenant=$(echo "$tenant"| tr '[:upper:]' '[:lower:]') +tenant=$(echo "$tenant" | tr '[:upper:]' '[:lower:]') # get credentials get_credentials_from_secret admin rc=$? -if [ "$rc" != "0" ] ;then log_info "RC=$rc"; exit $rc;fi +if [ "$rc" != "0" ]; then + log_info "RC=$rc" + exit $rc +fi -if kibana_tenant_exists $tenant; then - log_debug "Confirmed OpenSearch Dashboards tenant space [$tenant] exists" -elif [ "$tenant" == "global" ];then - log_debug "OpenSearch Dashboards tenant space [global] specified." +if kibana_tenant_exists "$tenant"; then + log_debug "Confirmed OpenSearch Dashboards tenant space [$tenant] exists" +elif [ "$tenant" == "global" ]; then + log_debug "OpenSearch Dashboards tenant space [global] specified." else - log_error "Specified tenant space [$tenant] does not exist. Target OpenSearch Dashboards tenant space must exist." - exit 1 + log_error "Specified tenant space [$tenant] does not exist. Target OpenSearch Dashboards tenant space must exist." + exit 1 fi - # Deploy either the specified .ndjson file or all .ndjson files in the specified directory if [ -f "$1" ]; then - if [[ $1 =~ .+\.ndjson ]]; then - # Deploy single content file - f=$1 - log_info "Importing content from file [$f] to tenant space [$tenant]..." - - import_file $f - import_problems=$? - else - log_error "The specified content file [$1] is not a .ndjson file." - exit 1 - fi -elif [ -d "$1" ]; then + if [[ $1 =~ .+\.ndjson ]]; then + # Deploy single content file + f=$1 + log_info "Importing content from file [$f] to tenant space [$tenant]..." + import_file "$f" + import_problems=$? + else + log_error "The specified content file [$1] is not a .ndjson file." + exit 1 + fi +elif [ -d "$1" ]; then # Deploy specified directory of OSD content log_info "Importing content in [$1] to tenant space [$tenant]..." if [ "$batch_kibana_content" != "true" ]; then - log_debug "'BATCH_KIBANA_CONTENT' flag set to 'false'; loading files individually from directory" - import_content $1 + log_debug "'BATCH_KIBANA_CONTENT' flag set to 'false'; loading files individually from directory" + import_content "$1" else - import_content_batch $1 + import_content_batch "$1" fi import_problems=$? elif [ "$ignore_not_found" == "true" ]; then - log_debug "The specified file/directory to import [$1] does not exist or cannot be accessed but --ignore-not-found flag has been set." - exit 0 + log_debug "The specified file/directory to import [$1] does not exist or cannot be accessed but --ignore-not-found flag has been set." + exit 0 else - log_error "The specified file/directory to import [$1] does not exist or cannot be accessed." - exit 1 + log_error "The specified file/directory to import [$1] does not exist or cannot be accessed." + exit 1 fi - if [ "$import_problems" == "0" ]; then - log_info "Imported content into tenant space [$tenant]." + log_info "Imported content into tenant space [$tenant]." else - log_warn "There were one or more issues deploying the requested content to OpenSearch Dashboards. Review the messages above." + log_warn "There were one or more issues deploying the requested content to OpenSearch Dashboards. Review the messages above." fi - diff --git a/logging/bin/rbac-include.sh b/logging/bin/rbac-include.sh index 2384c89f..2f287317 100755 --- a/logging/bin/rbac-include.sh +++ b/logging/bin/rbac-include.sh @@ -6,340 +6,328 @@ # This file is not marked as executable as it is intended to be sourced # Current directory must be the root directory of the repo - # # ROLE Functions # function create_role { - # Creates role using provided role template - # - # Returns: 0 - Role created - # 1 - Role NOT created - - local role role_template - - role=$1 - role_template=$2 - - response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$sec_api_url/roles/$role" -H 'Content-Type: application/json' -d @${role_template} --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - - if [[ $response == 2* ]]; then - log_info "Security role [$role] created. [$response]" - return 0 - else - log_error "There was an issue creating the security role [$role]. [$response]" - log_debug "template contents: /n $(cat $role_template)" - return 1 - fi -} + # Creates role using provided role template + # + # Returns: 0 - Role created + # 1 - Role NOT created + + local role role_template + + role=$1 + role_template=$2 + + response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$sec_api_url/roles/$role" -H 'Content-Type: application/json' -d @"${role_template}" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + if [[ $response == 2* ]]; then + log_info "Security role [$role] created. [$response]" + return 0 + else + log_error "There was an issue creating the security role [$role]. [$response]" + log_debug "template contents: /n $(cat "$role_template")" + return 1 + fi +} function delete_role { - # Delete SAS Viya deployment-restricted role - # - # Returns: 0 - Role deleted - # 1 - Role was/could not be deleted - - local role response - role=$1 - - if role_exists $role; then - response=$(curl -s -o /dev/null -w "%{http_code}" -XDELETE "$sec_api_url/roles/$role" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - if [[ $response != 2* ]]; then - log_error "There was an issue deleting the security role [$role]. [$response]" - return 1 - else - log_info "Security role [$role] deleted. [$response]" - fi - else - #role does not exist, nothing to do - log_debug "Role [$role] does not exist; not able to delete it." - return 1 - fi + # Delete SAS Viya deployment-restricted role + # + # Returns: 0 - Role deleted + # 1 - Role was/could not be deleted + + local role response + role=$1 + + if role_exists "$role"; then + response=$(curl -s -o /dev/null -w "%{http_code}" -XDELETE "$sec_api_url/roles/$role" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + if [[ $response != 2* ]]; then + log_error "There was an issue deleting the security role [$role]. [$response]" + return 1 + else + log_info "Security role [$role] deleted. [$response]" + fi + else + #role does not exist, nothing to do + log_debug "Role [$role] does not exist; not able to delete it." + return 1 + fi } function ensure_role_exists { - # Ensures specified role exists; creating it if necessary - # - # Returns: 0 - Role exists or was created - # 1 - Role does NOT exist and/or was NOT created - - local role role_template - - role=$1 - role_template=${2:-null} - - if role_exists $role; then - return 0 - else - if [ -n "$role_template" ]; then - rc=$() - if create_role $role $role_template; then - return 0 - else - return 1 - fi - else - # couldn't create it b/c we didn't have a template - log_debug "No role template provided; did not attempt to create role [$role]" - return 1 - fi - fi + # Ensures specified role exists; creating it if necessary + # + # Returns: 0 - Role exists or was created + # 1 - Role does NOT exist and/or was NOT created + + local role role_template + + role=$1 + role_template=${2:-null} + if role_exists "$role"; then + return 0 + else + if [ -n "$role_template" ]; then + if create_role "$role" "$role_template"; then + return 0 + else + return 1 + fi + else + # couldn't create it b/c we didn't have a template + log_debug "No role template provided; did not attempt to create role [$role]" + return 1 + fi + fi } function role_exists { - #Check if $role role exists - # - # Returns: 0 - Role exists - # 1 - Role does not exist - - local role response - - role=$1 - - response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "$sec_api_url/roles/$role" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - if [[ $response == 2* ]]; then - log_debug "Confirmed [$role] exists." - return 0 - else - log_debug "Role [$role] does not exist." - return 1 - fi -} + #Check if $role role exists + # + # Returns: 0 - Role exists + # 1 - Role does not exist + local role response + + role=$1 + + response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "$sec_api_url/roles/$role" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + if [[ $response == 2* ]]; then + log_debug "Confirmed [$role] exists." + return 0 + else + log_debug "Role [$role] does not exist." + return 1 + fi +} # # ROLEMAPPING Functions # function add_rolemapping { - # adds $berole to the rolemappings for $targetrole + # adds $berole to the rolemappings for $targetrole - local targetrole berole json verb response - targetrole=$1 - berole=$2 + local targetrole berole json verb response + targetrole=$1 + berole=$2 - log_debug "Parms passed to add_rolemapping function targetrole=$targetrole berole=$berole" + log_debug "Parms passed to add_rolemapping function targetrole=$targetrole berole=$berole" + # get existing rolemappings for $targetrole + response=$(curl -s -o "$TMP_DIR"/rolemapping.json -w "%{http_code}" -XGET "$sec_api_url/rolesmapping/$targetrole" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) - # get existing rolemappings for $targetrole - response=$(curl -s -o $TMP_DIR/rolemapping.json -w "%{http_code}" -XGET "$sec_api_url/rolesmapping/$targetrole" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - - if [[ $response == 404 ]]; then - log_debug "Rolemappings for [$targetrole] do not exist; creating rolemappings. [$response]" - - json='{"backend_roles" : ["'"$berole"'"]}' - verb=PUT + if [[ $response == 404 ]]; then + log_debug "Rolemappings for [$targetrole] do not exist; creating rolemappings. [$response]" - elif [[ $response == 2* ]]; then - log_debug "Existing rolemappings for [$targetrole] obtained. [$response]" - log_debug "$(cat $TMP_DIR/rolemapping.json)" + json='{"backend_roles" : ["'"$berole"'"]}' + verb=PUT + elif [[ $response == 2* ]]; then + log_debug "Existing rolemappings for [$targetrole] obtained. [$response]" + log_debug "$(cat "$TMP_DIR"/rolemapping.json)" - if [ "$(grep $berole $TMP_DIR/rolemapping.json)" ]; then - log_debug "A rolemapping between [$targetrole] and back-end role [$berole] already appears to exist; leaving as-is." - return 0 - elif [ "$(grep '\"backend_roles\":\[\],' $TMP_DIR/rolemapping.json)" ]; then - log_debug "The role [$targetrole] has no existing rolemappings" - json='[{"op": "add","path": "/backend_roles","value":["'"$berole"'"]}]' - verb=PATCH + if grep -q "$berole" "$TMP_DIR"/rolemapping.json; then + log_debug "A rolemapping between [$targetrole] and back-end role [$berole] already appears to exist; leaving as-is." + return 0 + elif grep -q '\"backend_roles\":\[\],' "$TMP_DIR"/rolemapping.json; then + log_debug "The role [$targetrole] has no existing rolemappings" + json='[{"op": "add","path": "/backend_roles","value":["'"$berole"'"]}]' + verb=PATCH + else + json='[{"op": "add","path": "/backend_roles/-","value":"'"$berole"'"}]' + verb=PATCH + fi else - json='[{"op": "add","path": "/backend_roles/-","value":"'"$berole"'"}]' - verb=PATCH + log_error "There was an issue getting the existing rolemappings for [$targetrole]. [$response]" + return 1 fi - else - log_error "There was an issue getting the existing rolemappings for [$targetrole]. [$response]" - return 1 - fi - - log_debug "JSON data passed to curl [$verb]: $json" - - response=$(curl -s -o /dev/null -w "%{http_code}" -X${verb} "$sec_api_url/rolesmapping/$targetrole" -H 'Content-Type: application/json' -d "$json" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - if [[ $response != 2* ]]; then - log_error "There was an issue creating the rolemapping between [$targetrole] and backend-role(s) ["$berole"]. [$response]" - return 1 - else - log_info "Security rolemapping created between [$targetrole] and backend-role(s) ["$berole"]. [$response]" - return 0 - fi + log_debug "JSON data passed to curl [$verb]: $json" + response=$(curl -s -o /dev/null -w "%{http_code}" -X${verb} "$sec_api_url/rolesmapping/$targetrole" -H 'Content-Type: application/json' -d "$json" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + if [[ $response != 2* ]]; then + log_error "There was an issue creating the rolemapping between [$targetrole] and backend-role(s) [""$berole""]. [$response]" + return 1 + else + log_info "Security rolemapping created between [$targetrole] and backend-role(s) [""$berole""]. [$response]" + return 0 + fi } - function delete_rolemappings { - # Delete ALL role-mappings for specified role - # - # Returns: 0 - Rolemappings deleted - # 1 - Rolemappings were/could not be deleted - - local role response - role=$1 - - if role_exists $role; then - response=$(curl -s -o /dev/null -w "%{http_code}" -XDELETE "$sec_api_url/rolesmapping/$role" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - if [[ $response == 404 ]]; then - log_info "Rolemappings for [$role] do not exist; nothing to delete. [$response]" - return 0 - elif [[ $response != 2* ]]; then - log_error "There was an issue deleting the rolemappings for [$role]. [$response]" - return 1 - else - log_info "Security rolemappings for [$role] deleted. [$response]" - return 0 - fi - else - #role does not exist, nothing to do - log_debug "Role [$role] does not exist; no rolemappings to delete." - return 1 - fi + # Delete ALL role-mappings for specified role + # + # Returns: 0 - Rolemappings deleted + # 1 - Rolemappings were/could not be deleted + + local role response + role=$1 + + if role_exists "$role"; then + response=$(curl -s -o /dev/null -w "%{http_code}" -XDELETE "$sec_api_url/rolesmapping/$role" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + if [[ $response == 404 ]]; then + log_info "Rolemappings for [$role] do not exist; nothing to delete. [$response]" + return 0 + elif [[ $response != 2* ]]; then + log_error "There was an issue deleting the rolemappings for [$role]. [$response]" + return 1 + else + log_info "Security rolemappings for [$role] deleted. [$response]" + return 0 + fi + else + #role does not exist, nothing to do + log_debug "Role [$role] does not exist; no rolemappings to delete." + return 1 + fi } - function remove_rolemapping { - # removes $berole2remove from the rolemappings - # for $targetrole (if $targetrole exists) - - # - # Returns: 0 - The rolemappings removed - # 1 - The rolemappings were/could not be removed - - local targetrole regex json beroles newroles response berole2remove - targetrole=$1 - berole2remove=$2 - log_debug "remove_rolemapping targetrole:$targetrole berole2remove:$berole2remove" - - if role_exists $targetrole; then - - # get existing rolemappings for $targetrole - response=$(curl -s -o $TMP_DIR/rolemapping.json -w "%{http_code}" -XGET "$sec_api_url/rolesmapping/$targetrole" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - - if [[ $response == 404 ]]; then - log_debug "Rolemappings for [$targetrole] do not exist; nothing to do. [$response]" - return 0 - elif [[ $response != 2* ]]; then - log_error "There was an issue getting the existing rolemappings for [$targetrole]. [$response]" - return 1 - else - log_debug "Existing rolemappings for [$targetrole] obtained. [$response]" - log_debug "$(cat $TMP_DIR/rolemapping.json)" + # removes $berole2remove from the rolemappings + # for $targetrole (if $targetrole exists) - regex='"backend_roles":\[((("[_0-9a-zA-Z\-]+",?)?)+)\]' - json=$(cat $TMP_DIR/rolemapping.json) + # + # Returns: 0 - The rolemappings removed + # 1 - The rolemappings were/could not be removed - if [[ $json =~ $regex ]]; then + local targetrole regex json be_roles newroles response berole2remove + targetrole=$1 + berole2remove=$2 + log_debug "remove_rolemapping targetrole:$targetrole berole2remove:$berole2remove" - be_roles="[${BASH_REMATCH[1]}]" + if role_exists "$targetrole"; then - if [ -z "$be_roles" ]; then - log_debug "No backend roles to patch for [$targetrole]; moving on" - return 0 - else + # get existing rolemappings for $targetrole + response=$(curl -s -o "$TMP_DIR"/rolemapping.json -w "%{http_code}" -XGET "$sec_api_url/rolesmapping/$targetrole" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) - # ODFE 1.7 {"kibana_user":{"reserved":false,"hidden":false,"backend_roles":["kibanauser","d27885_kibana_users","acme_d27885_kibana_user"],"hosts":[],"users":[],"and_backend_roles":[],"description":"Maps kibanauser to kibana_user"}} - # ODFE 1.13 {"kibana_user":{"hosts":[],"users":[],"reserved":false,"hidden":false,"backend_roles":["kibanauser","d27886_kibana_users","d35396_kibana_users","d35396_acme_kibana_users","d35396A_kibana_users","d35396A_acme_kibana_users"],"and_backend_roles":[]}} - - # Extract and reconstruct backend_roles array from rolemapping json - newroles=$(echo $be_roles | sed "s/\"$berole2remove\"//g;s/,,,/,/g;s/,,/,/g; s/,]/]/g;s/\[,/\[/g") - if [ "$be_roles" == "$newroles" ]; then - log_debug "The backend role [$berole2remove] is not mapped to [$targetrole]; moving on." - return 0 - else + if [[ $response == 404 ]]; then + log_debug "Rolemappings for [$targetrole] do not exist; nothing to do. [$response]" + return 0 + elif [[ $response != 2* ]]; then + log_error "There was an issue getting the existing rolemappings for [$targetrole]. [$response]" + return 1 + else + log_debug "Existing rolemappings for [$targetrole] obtained. [$response]" + log_debug "$(cat "$TMP_DIR"/rolemapping.json)" - log_debug "Updated Back-end Role ($targetrole): $newroles" + regex='"backend_roles":\[((("[_0-9a-zA-Z\-]+",?)?)+)\]' + json=$(cat "$TMP_DIR"/rolemapping.json) - # Copy RBAC template - cp logging/opensearch/rbac/backend_rolemapping_delete.json $TMP_DIR/${targetrole}_backend_rolemapping_delete.json + if [[ $json =~ $regex ]]; then - #update json template file w/revised list of backend roles - sed -i'.bak' "s/xxBACKENDROLESxx/$newroles/g" $TMP_DIR/${targetrole}_backend_rolemapping_delete.json # BACKENDROLES + be_roles="[${BASH_REMATCH[1]}]" - # Replace the rolemappings for the $targetrole with the revised list of backend roles - response=$(curl -s -o /dev/null -w "%{http_code}" -XPATCH "$sec_api_url/rolesmapping/$targetrole" -H 'Content-Type: application/json' -d @$TMP_DIR/${targetrole}_backend_rolemapping_delete.json --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) - if [[ $response != 2* ]]; then - log_error "There was an issue updating the rolesmapping for [$targetrole] to remove link with backend-role [$berole2remove]. [$response]" - return 1 + if [ -z "$be_roles" ]; then + log_debug "No backend roles to patch for [$targetrole]; moving on" + return 0 else - log_info "Security rolemapping deleted between [$targetrole] and backend-role [$berole2remove]. [$response]" - return 0 + + # ODFE 1.7 {"kibana_user":{"reserved":false,"hidden":false,"backend_roles":["kibanauser","d27885_kibana_users","acme_d27885_kibana_user"],"hosts":[],"users":[],"and_backend_roles":[],"description":"Maps kibanauser to kibana_user"}} + # ODFE 1.13 {"kibana_user":{"hosts":[],"users":[],"reserved":false,"hidden":false,"backend_roles":["kibanauser","d27886_kibana_users","d35396_kibana_users","d35396_acme_kibana_users","d35396A_kibana_users","d35396A_acme_kibana_users"],"and_backend_roles":[]}} + + # Extract and reconstruct backend_roles array from rolemapping json + newroles=$(echo "$be_roles" | sed "s/\"$berole2remove\"//g;s/,,,/,/g;s/,,/,/g; s/,]/]/g;s/\[,/\[/g") + if [ "$be_roles" == "$newroles" ]; then + log_debug "The backend role [$berole2remove] is not mapped to [$targetrole]; moving on." + return 0 + else + + log_debug "Updated Back-end Role ($targetrole): $newroles" + + # Copy RBAC template + cp logging/opensearch/rbac/backend_rolemapping_delete.json "$TMP_DIR"/"${targetrole}"_backend_rolemapping_delete.json + + #update json template file w/revised list of backend roles + sed -i'.bak' "s/xxBACKENDROLESxx/$newroles/g" "$TMP_DIR"/"${targetrole}"_backend_rolemapping_delete.json # BACKENDROLES + + # Replace the rolemappings for the $targetrole with the revised list of backend roles + response=$(curl -s -o /dev/null -w "%{http_code}" -XPATCH "$sec_api_url/rolesmapping/$targetrole" -H 'Content-Type: application/json' -d @"$TMP_DIR"/"${targetrole}"_backend_rolemapping_delete.json --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + if [[ $response != 2* ]]; then + log_error "There was an issue updating the rolesmapping for [$targetrole] to remove link with backend-role [$berole2remove]. [$response]" + return 1 + else + log_info "Security rolemapping deleted between [$targetrole] and backend-role [$berole2remove]. [$response]" + return 0 + fi + fi fi - fi - fi - fi + fi + fi + else + log_debug "The role [$targetrole] does not exist; doing nothing. [$response]" fi - else - log_debug "The role [$targetrole] does not exist; doing nothing. [$response]" - fi } - # # TENANT Functions # function create_kibana_tenant { - # Creates a Kibana tenant - # - # Returns: 0 - Kibana tenant created - # 1 - Kibana tenant NOT created + # Creates a Kibana tenant + # + # Returns: 0 - Kibana tenant created + # 1 - Kibana tenant NOT created - local tenant description response + local tenant description response - tenant=$1 - description=$2 + tenant=$1 + description=$2 - response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$sec_api_url/tenants/$tenant" -H 'Content-Type: application/json' -d '{"description":"'"$description"'"}' --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) + response=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$sec_api_url/tenants/$tenant" -H 'Content-Type: application/json' -d '{"description":"'"$description"'"}' --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) - if [[ $response == 2* ]]; then - log_info "OpenSearch Dashboards tenant space [$tenant] created. [$response]" - return 0 - else - log_error "There was an issue creating the OpenSearch Dashboards tenant space [$tenant]. [$response]" - return 1 - fi + if [[ $response == 2* ]]; then + log_info "OpenSearch Dashboards tenant space [$tenant] created. [$response]" + return 0 + else + log_error "There was an issue creating the OpenSearch Dashboards tenant space [$tenant]. [$response]" + return 1 + fi } function delete_kibana_tenant { - # Deletes a Kibana tenant - # - # Returns: 0 - Kibana tenant deleted - # 1 - Kibana tenant NOT deleted + # Deletes a Kibana tenant + # + # Returns: 0 - Kibana tenant deleted + # 1 - Kibana tenant NOT deleted - local tenant response + local tenant response - tenant=$1 + tenant=$1 - response=$(curl -s -o /dev/null -w "%{http_code}" -XDELETE "$sec_api_url/tenants/$tenant" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure) + response=$(curl -s -o /dev/null -w "%{http_code}" -XDELETE "$sec_api_url/tenants/$tenant" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) - if [[ $response == 2* ]]; then - log_info "OpenSearch Dashboards tenant space [$tenant] deleted. [$response]" - return 0 - else - log_error "There was an issue deleting the OpenSearch Dashboards tenant space [$tenant]. [$response]" - return 1 - fi + if [[ $response == 2* ]]; then + log_info "OpenSearch Dashboards tenant space [$tenant] deleted. [$response]" + return 0 + else + log_error "There was an issue deleting the OpenSearch Dashboards tenant space [$tenant]. [$response]" + return 1 + fi } function kibana_tenant_exists { - # Check if $tenant exists - # - # Returns: 0 - Tenant exists - # 1 - Tenant does not exist - - local tenant response - tenant=$1 - - response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "${sec_api_url}/tenants/$tenant" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure ) - - if [[ $response == 2* ]]; then - log_debug "Confirmed OpenSearch Dashboards tenant [$tenant] exists. [$response]" - return 0 - else - log_debug "OpenSearch Dashboards tenant [$tenant] does not exist. [$response]" - return 1 - fi + # Check if $tenant exists + # + # Returns: 0 - Tenant exists + # 1 - Tenant does not exist + + local tenant response + tenant=$1 + + response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "${sec_api_url}/tenants/$tenant" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + + if [[ $response == 2* ]]; then + log_debug "Confirmed OpenSearch Dashboards tenant [$tenant] exists. [$response]" + return 0 + else + log_debug "OpenSearch Dashboards tenant [$tenant] does not exist. [$response]" + return 1 + fi } # @@ -347,47 +335,47 @@ function kibana_tenant_exists { # function user_exists { - # Check if $user exists - # - # Returns: 0 - User exists - # 1 - User does not exist - - local username response - username=$1 - - response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "${sec_api_url}/internalusers/$username" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure ) - - if [[ $response == 2* ]]; then - log_debug "Confirmed OpenSearch user [$username] exists. [$response]" - return 0 - else - log_debug "OpenSearch user [$username] does not exist. [$response]" - return 1 - fi + # Check if $user exists + # + # Returns: 0 - User exists + # 1 - User does not exist + + local username response + username=$1 + + response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "${sec_api_url}/internalusers/$username" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + + if [[ $response == 2* ]]; then + log_debug "Confirmed OpenSearch user [$username] exists. [$response]" + return 0 + else + log_debug "OpenSearch user [$username] does not exist. [$response]" + return 1 + fi } function delete_user { - # Deletes $user from internal user - # - # Returns: 0 - User deleted - # 1 - User NOT deleted - - local username response - username=$1 - - if user_exists $username; then - response=$(curl -s -o /dev/null -w "%{http_code}" -XDELETE "${sec_api_url}/internalusers/$username" --user $ES_ADMIN_USER:$ES_ADMIN_PASSWD --insecure ) - - if [[ $response == 2* ]]; then - log_debug "User [$username] deleted. [$response]" - return 0 - else - log_error "There was an issue deleting the user role [$username]. [$response]" - return 1 - fi - else - #username does not exist, nothing to do - log_debug "User [$userename] does not exist; not able to delete it." - return 1 - fi + # Deletes $user from internal user + # + # Returns: 0 - User deleted + # 1 - User NOT deleted + + local username response + username=$1 + + if user_exists "$username"; then + response=$(curl -s -o /dev/null -w "%{http_code}" -XDELETE "${sec_api_url}/internalusers/$username" --user "$ES_ADMIN_USER":"$ES_ADMIN_PASSWD" --insecure) + + if [[ $response == 2* ]]; then + log_debug "User [$username] deleted. [$response]" + return 0 + else + log_error "There was an issue deleting the user role [$username]. [$response]" + return 1 + fi + else + #username does not exist, nothing to do + log_debug "User [$userename] does not exist; not able to delete it." + return 1 + fi } diff --git a/logging/bin/remove_esexporter.sh b/logging/bin/remove_esexporter.sh index 0e91c245..8e374f6e 100755 --- a/logging/bin/remove_esexporter.sh +++ b/logging/bin/remove_esexporter.sh @@ -3,17 +3,16 @@ # Copyright © 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh -this_script=`basename "$0"` +this_script=$(basename "$0") log_debug "Script [$this_script] has started [$(date)]" log_info "Removing Elasticsearch Exporter [$(date)]" -helm delete -n $LOG_NS es-exporter +helm delete -n "$LOG_NS" es-exporter log_debug "Script [$this_script] has completed [$(date)]" echo "" - diff --git a/logging/bin/remove_fluentbit_azmonitor.sh b/logging/bin/remove_fluentbit_azmonitor.sh index 0fb47257..2da01738 100755 --- a/logging/bin/remove_fluentbit_azmonitor.sh +++ b/logging/bin/remove_fluentbit_azmonitor.sh @@ -3,10 +3,8 @@ # Copyright © 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname "$BASH_SOURCE")/../.." || { - echo "Failed to change directory" - exit 1 -} +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 + source logging/bin/common.sh this_script=$(basename "$0") diff --git a/logging/bin/remove_fluentbit_k8sevents_opensearch.sh b/logging/bin/remove_fluentbit_k8sevents_opensearch.sh index 303055f3..f22befef 100755 --- a/logging/bin/remove_fluentbit_k8sevents_opensearch.sh +++ b/logging/bin/remove_fluentbit_k8sevents_opensearch.sh @@ -3,10 +3,8 @@ # Copyright © 2023, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname "$BASH_SOURCE")/../.." || { - echo "Failed to change directory" - exit 1 -} +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 + source logging/bin/common.sh this_script=$(basename "$0") diff --git a/logging/bin/remove_fluentbit_opensearch.sh b/logging/bin/remove_fluentbit_opensearch.sh index 8eb5afe3..c8701de3 100755 --- a/logging/bin/remove_fluentbit_opensearch.sh +++ b/logging/bin/remove_fluentbit_opensearch.sh @@ -3,10 +3,8 @@ # Copyright © 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname "$BASH_SOURCE")/../.." || { - echo "Failed to change directory" - exit 1 -} +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 + source logging/bin/common.sh this_script=$(basename "$0") diff --git a/logging/bin/remove_logging.sh b/logging/bin/remove_logging.sh index b39b8156..8bf3ed02 100755 --- a/logging/bin/remove_logging.sh +++ b/logging/bin/remove_logging.sh @@ -3,19 +3,18 @@ # Copyright © 2022, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh # Confirm NOT on OpenShift if [ "$OPENSHIFT_CLUSTER" == "true" ]; then - if [ "${CHECK_OPENSHIFT_CLUSTER:-true}" == "true" ]; then - log_error "This script should NOT be run on OpenShift clusters" - log_error "Run logging/bin/remove_logging_openshift.sh instead" - exit 1 - fi + if [ "${CHECK_OPENSHIFT_CLUSTER:-true}" == "true" ]; then + log_error "This script should NOT be run on OpenShift clusters" + log_error "Run logging/bin/remove_logging_openshift.sh instead" + exit 1 + fi fi - LOG_DELETE_CONFIGMAPS_ON_REMOVE=${LOG_DELETE_CONFIGMAPS_ON_REMOVE:-true} LOG_DELETE_SECRETS_ON_REMOVE=${LOG_DELETE_SECRETS_ON_REMOVE:-true} LOG_DELETE_PVCS_ON_REMOVE=${LOG_DELETE_PVCS_ON_REMOVE:-false} @@ -23,8 +22,8 @@ LOG_DELETE_NAMESPACE_ON_REMOVE=${LOG_DELETE_NAMESPACE_ON_REMOVE:-false} ##29MAR22: TODO: Remove this section? # Check for existing incompatible helm releases up front -helm2ReleaseCheck odfe-$LOG_NS -helm2ReleaseCheck es-exporter-$LOG_NS +helm2ReleaseCheck odfe-"$LOG_NS" +helm2ReleaseCheck es-exporter-"$LOG_NS" log_notice "Removing logging components from the [$LOG_NS] namespace [$(date)]" @@ -41,18 +40,18 @@ logging/bin/remove_eventrouter.sh logging/bin/remove_fluentbit_k8sevents_opensearch.sh if [ "$LOG_DELETE_PVCS_ON_REMOVE" == "true" ]; then - log_verbose "Removing known logging PVCs..." - kubectl delete pvc --ignore-not-found -n $LOG_NS -l app.kubernetes.io/name=opensearch + log_verbose "Removing known logging PVCs..." + kubectl delete pvc --ignore-not-found -n "$LOG_NS" -l app.kubernetes.io/name=opensearch fi if [ "$LOG_DELETE_SECRETS_ON_REMOVE" == "true" ]; then - log_verbose "Removing known logging secrets..." - kubectl delete secret --ignore-not-found -n $LOG_NS -l managed-by=v4m-es-script + log_verbose "Removing known logging secrets..." + kubectl delete secret --ignore-not-found -n "$LOG_NS" -l managed-by=v4m-es-script fi if [ "$LOG_DELETE_CONFIGMAPS_ON_REMOVE" == "true" ]; then - log_verbose "Removing known logging configmaps..." - kubectl delete configmap --ignore-not-found -n $LOG_NS -l managed-by=v4m-es-script + log_verbose "Removing known logging configmaps..." + kubectl delete configmap --ignore-not-found -n "$LOG_NS" -l managed-by=v4m-es-script fi # Check for and remove any v4m deployments with old naming convention @@ -61,36 +60,35 @@ removeV4MInfo "$LOG_NS" "v4m" removeV4MInfo "$LOG_NS" "v4m-logs" if [ "$LOG_DELETE_NAMESPACE_ON_REMOVE" == "true" ]; then - log_info "Deleting the [$LOG_NS] namespace..." - if kubectl delete namespace $LOG_NS --timeout $KUBE_NAMESPACE_DELETE_TIMEOUT; then - log_info "[$LOG_NS] namespace and logging components successfully removed" - exit 0 - else - log_error "Unable to delete the [$LOG_NS] namespace" - exit 1 - fi + log_info "Deleting the [$LOG_NS] namespace..." + if kubectl delete namespace "$LOG_NS" --timeout "$KUBE_NAMESPACE_DELETE_TIMEOUT"; then + log_info "[$LOG_NS] namespace and logging components successfully removed" + exit 0 + else + log_error "Unable to delete the [$LOG_NS] namespace" + exit 1 + fi fi log_info "Waiting 60 sec for resources to terminate..." sleep 60 log_info "Checking contents of the [$LOG_NS] namespace:" -objects=( all pvc secret configmap) +objects=(all pvc secret configmap) empty="true" -for object in "${objects[@]}" -do - out=$(kubectl get -n $LOG_NS $object 2>&1) - if [[ "$out" =~ 'No resources found' ]]; then - : - else - empty="false" - log_warn "Found [$object] resources in the [$LOG_NS] namespace:" - echo "$out" - fi +for object in "${objects[@]}"; do + out=$(kubectl get -n "$LOG_NS" "$object" 2>&1) + if [[ $out =~ 'No resources found' ]]; then + : + else + empty="false" + log_warn "Found [$object] resources in the [$LOG_NS] namespace:" + echo "$out" + fi done if [ "$empty" == "true" ]; then - log_info " The [$LOG_NS] namespace is empty and should be safe to delete." + log_info " The [$LOG_NS] namespace is empty and should be safe to delete." else - log_warn " The [$LOG_NS] namespace is not empty." - log_warn " Examine the resources above before deleting the namespace." + log_warn " The [$LOG_NS] namespace is not empty." + log_warn " Examine the resources above before deleting the namespace." fi diff --git a/logging/bin/remove_logging_fluentbit_azmonitor.sh b/logging/bin/remove_logging_fluentbit_azmonitor.sh deleted file mode 100755 index 6476b231..00000000 --- a/logging/bin/remove_logging_fluentbit_azmonitor.sh +++ /dev/null @@ -1,25 +0,0 @@ -#! /bin/bash - -# Copyright © 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -cd "$(dirname $BASH_SOURCE)/../.." -source logging/bin/common.sh - -this_script=`basename "$0"` - -log_debug "Script [$this_script] has started [$(date)]" - -# DEPRECATION NOTICE -log_notice "* This script [remove_logging_fluentbit_azmonitor.sh] is deprecated and will be removed in the future. *" -log_notice "* Moving forward, please use the [remove_fluentbit_azmonitor.sh] script to remove Fluent Bit instead. *" -echo "" - -log_info "Calling the [remove_fluentbit_azmonitor.sh] script..." - -# Call replacement script -logging/bin/remove_fluentbit_azmonitor.sh - - -log_debug "Script [$this_script] has completed [$(date)]" -echo "" diff --git a/logging/bin/remove_logging_open.sh b/logging/bin/remove_logging_open.sh deleted file mode 100755 index c5f23b34..00000000 --- a/logging/bin/remove_logging_open.sh +++ /dev/null @@ -1,11 +0,0 @@ -#! /bin/bash - -# Copyright © 2022, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -cd "$(dirname $BASH_SOURCE)/../.." -source logging/bin/common.sh - -log_error "This script is obsolete." -log_error "Run logging/bin/remove_logging.sh instead." - diff --git a/logging/bin/remove_logging_openshift.sh b/logging/bin/remove_logging_openshift.sh index 5b65b51a..be80bb2b 100755 --- a/logging/bin/remove_logging_openshift.sh +++ b/logging/bin/remove_logging_openshift.sh @@ -3,23 +3,22 @@ # Copyright © 2021, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh -this_script=`basename "$0"` +this_script=$(basename "$0") log_debug "Script [$this_script] has started [$(date)]" # Confirm on OpenShift if [ "$OPENSHIFT_CLUSTER" != "true" ]; then - if [ "${CHECK_OPENSHIFT_CLUSTER:-true}" == "true" ]; then - log_error "This script should only be run on OpenShift clusters" - log_error "Run logging/bin/remove_logging.sh instead" - exit 1 - fi + if [ "${CHECK_OPENSHIFT_CLUSTER:-true}" == "true" ]; then + log_error "This script should only be run on OpenShift clusters" + log_error "Run logging/bin/remove_logging.sh instead" + exit 1 + fi fi - # remove OpenShift-specific content not removed by primary removal script logging/bin/remove_openshift_artifacts.sh logging/bin/remove_openshift_routes.sh diff --git a/logging/bin/remove_opensearch.sh b/logging/bin/remove_opensearch.sh index 703d3952..f5daca7c 100755 --- a/logging/bin/remove_opensearch.sh +++ b/logging/bin/remove_opensearch.sh @@ -3,20 +3,18 @@ # Copyright © 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh -this_script=`basename "$0"` +this_script=$(basename "$0") log_debug "Script [$this_script] has started [$(date)]" log_info "Removing OpenSearch [$(date)]" -helm delete -n $LOG_NS opensearch - +helm delete -n "$LOG_NS" opensearch log_verbose "Removing ConfigMaps" -kubectl -n $LOG_NS delete configmap run-securityadmin.sh --ignore-not-found +kubectl -n "$LOG_NS" delete configmap run-securityadmin.sh --ignore-not-found log_debug "Script [$this_script] has completed [$(date)]" echo "" - diff --git a/logging/bin/remove_openshift_artifacts.sh b/logging/bin/remove_openshift_artifacts.sh index cd71fe47..6ba6e371 100755 --- a/logging/bin/remove_openshift_artifacts.sh +++ b/logging/bin/remove_openshift_artifacts.sh @@ -3,10 +3,10 @@ # Copyright © 2021, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh -this_script=`basename "$0"` +this_script=$(basename "$0") log_debug "Script [$this_script] has started [$(date)]" @@ -14,16 +14,14 @@ log_debug "Script [$this_script] has started [$(date)]" OPENSHIFT_ARTIFACTS_REMOVE=${OPENSHIFT_ARTIFACTS_REMOVE:-true} if [ "$OPENSHIFT_ARTIFACTS_REMOVE" != "true" ]; then - log_info "Environment variable [OPENSHIFT_ARTIFACTS_REMOVE] is not set to 'true'; exiting WITHOUT removing OpenShift Artifacts" - exit + log_info "Environment variable [OPENSHIFT_ARTIFACTS_REMOVE] is not set to 'true'; exiting WITHOUT removing OpenShift Artifacts" + exit fi # remove custom OpenShift SCC -oc delete scc v4mlogging --ignore-not-found +oc delete scc v4mlogging --ignore-not-found oc delete scc v4m-logging-v2 --ignore-not-found -oc delete scc v4m-k8sevents --ignore-not-found - - +oc delete scc v4m-k8sevents --ignore-not-found log_info "OpenShift Prerequisites have been removed." diff --git a/logging/bin/remove_openshift_routes.sh b/logging/bin/remove_openshift_routes.sh index 69f655ea..652cfedd 100755 --- a/logging/bin/remove_openshift_routes.sh +++ b/logging/bin/remove_openshift_routes.sh @@ -3,17 +3,17 @@ # Copyright © 2021, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh -this_script=`basename "$0"` +this_script=$(basename "$0") log_debug "Script [$this_script] has started [$(date)]" -oc -n $LOG_NS delete route v4m-es-kibana-svc --ignore-not-found -oc -n $LOG_NS delete route v4m-es-client-service --ignore-not-found -oc -n $LOG_NS delete route v4m-search --ignore-not-found -oc -n $LOG_NS delete route v4m-osd --ignore-not-found +oc -n "$LOG_NS" delete route v4m-es-kibana-svc --ignore-not-found +oc -n "$LOG_NS" delete route v4m-es-client-service --ignore-not-found +oc -n "$LOG_NS" delete route v4m-search --ignore-not-found +oc -n "$LOG_NS" delete route v4m-osd --ignore-not-found log_info "OpenShift Routes have been removed." diff --git a/logging/bin/remove_osd.sh b/logging/bin/remove_osd.sh index 6feb3931..c912b3c7 100755 --- a/logging/bin/remove_osd.sh +++ b/logging/bin/remove_osd.sh @@ -3,18 +3,17 @@ # Copyright © 2022, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -cd "$(dirname $BASH_SOURCE)/../.." +cd "$(dirname "$BASH_SOURCE")/../.." || exit 1 source logging/bin/common.sh -this_script=`basename "$0"` +this_script=$(basename "$0") log_debug "Script [$this_script] has started [$(date)]" log_info "Removing OpenSearch Dashboards [$(date)]" -helm delete -n $LOG_NS v4m-osd +helm delete -n "$LOG_NS" v4m-osd -kubectl -n $LOG_NS delete secret v4m-osd-tls-enabled --ignore-not-found +kubectl -n "$LOG_NS" delete secret v4m-osd-tls-enabled --ignore-not-found log_debug "Script [$this_script] has completed [$(date)]" echo "" - diff --git a/logging/bin/secrets-include.sh b/logging/bin/secrets-include.sh index bb9f68b1..3c2f77bd 100644 --- a/logging/bin/secrets-include.sh +++ b/logging/bin/secrets-include.sh @@ -8,101 +8,105 @@ function create_secret_from_file { - file=$1 - secret_name=$2 - label=$3 - - if [ -z "$(kubectl -n $LOG_NS get secret $secret_name -o name 2>/dev/null)" ]; then - - log_debug "Creating secret [$secret_name]" - - if [ -f "$USER_DIR/logging/$file" ]; then filepath=$USER_DIR/logging - elif [ -f "logging/opensearch/$file" ]; then filepath=logging/opensearch - else - log_error "Could not create secret [$secret_name] because file [$file] could not be found" - return 9 - fi - - if [ "$(kubectl -n $LOG_NS create secret generic $secret_name --from-file=$filepath/$file)" == "secret/$secret_name created" ]; then - log_verbose "Created secret for OpenSearch config file [$file]" - - if [ "$label" != "" ]; then - log_debug "Applying label [$label] to newly created secret [$secret_name]" - kubectl -n $LOG_NS label secret $secret_name $label - fi - - return 0 + file=$1 + secret_name=$2 + label=$3 + + if [ -z "$(kubectl -n "$LOG_NS" get secret "$secret_name" -o name 2> /dev/null)" ]; then + + log_debug "Creating secret [$secret_name]" + + if [ -f "$USER_DIR/logging/$file" ]; then + filepath=$USER_DIR/logging + elif [ -f "logging/opensearch/$file" ]; then + filepath=logging/opensearch + else + log_error "Could not create secret [$secret_name] because file [$file] could not be found" + return 9 + fi + + if [ "$(kubectl -n "$LOG_NS" create secret generic "$secret_name" --from-file="$filepath"/"$file")" == "secret/$secret_name created" ]; then + log_verbose "Created secret for OpenSearch config file [$file]" + + if [ -n "$label" ]; then + log_debug "Applying label [$label] to newly created secret [$secret_name]" + # shellcheck disable=SC2086 + # quoting $label results in invalid kubectl syntax + kubectl -n "$LOG_NS" label secret "$secret_name" $label + fi + + return 0 + else + log_error "Could not create secret for OpenSearch config file [$file]" + return 8 + fi else - log_error "Could not create secret for OpenSearch config file [$file]" - return 8 + log_verbose "Using existing secret [$secret_name]" + return 0 fi - else - log_verbose "Using existing secret [$secret_name]" - return 0 - fi } function create_user_secret { - secret_name=$1 - username=$2 - password=$3 - label=$4 - - if [ -z "$(kubectl -n $LOG_NS get secret $secret_name -o name 2>/dev/null)" ]; then - - # log_debug "Will attempt to create secret [$secret_name]" - - if [ "$password" == "" ]; then - # generate password if one not provided - log_debug "Generating random password for [$username]" - password="$(randomPassword)" - label="$label autogenerated_password=true" + secret_name=$1 + username=$2 + password=$3 + label=$4 + + if [ -z "$(kubectl -n "$LOG_NS" get secret "$secret_name" -o name 2> /dev/null)" ]; then + + # log_debug "Will attempt to create secret [$secret_name]" + + if [ -z "$password" ]; then + # generate password if one not provided + log_debug "Generating random password for [$username]" + password="$(randomPassword)" + label="$label autogenerated_password=true" + fi + + if [ "$(kubectl -n "$LOG_NS" create secret generic "$secret_name" --from-literal=username="$username" --from-literal=password="$password")" == "secret/$secret_name created" ]; then + + log_verbose "Created secret for OpenSearch user credentials [$username]" + + if [ -n "$label" ]; then + log_debug "Applying label [$label] to newly created secret [$secret_name]" + # shellcheck disable=SC2086 + # quoting $label results in invalid kubectl syntax + kubectl -n "$LOG_NS" label secret "$secret_name" $label + fi + + return 0 + else + log_error "Could not create secret for OpenSearch user credentials [$username]" + return 111 + fi + else + log_verbose "Using existing secret [$secret_name]" + return 0 fi +} - if [ "$(kubectl -n $LOG_NS create secret generic $secret_name --from-literal=username=$username --from-literal=password=$password)" == "secret/$secret_name created" ]; then +function get_credentials_from_secret { - log_verbose "Created secret for OpenSearch user credentials [$username]" + user=$1 - if [ "$label" != "" ]; then - log_debug "Applying label [$label] to newly created secret [$secret_name]" - kubectl -n $LOG_NS label secret $secret_name $label - fi + user_upper=$(echo "$user" | tr '[:lower:]' '[:upper:]') + secret_name="internal-user-$user" + user_var="ES_${user_upper}_USER" + passwd_var="ES_${user_upper}_PASSWD" - return 0 + if [ -z "$(kubectl -n "$LOG_NS" get secret "$secret_name" -o name 2> /dev/null)" ]; then + log_error "The expected secret [$secret_name], containing the required credentials for the [$user] identity, was not found in namespace [$LOG_NS]" + return 1 else - log_error "Could not create secret for OpenSearch user credentials [$username]" - return 111 + export "$user_var"="$(kubectl -n "$LOG_NS" get secret "$secret_name" -o=jsonpath="{.data.username}" 2> /dev/null | base64 --decode)" + export "$passwd_var"="$(kubectl -n "$LOG_NS" get secret "$secret_name" -o=jsonpath="{.data.password}" 2> /dev/null | base64 --decode)" + + if [ "${!user_var}" == "" ] || [ "${!passwd_var}" == "" ]; then + log_error "Required credentials for the [$user] user not found in the [$secret_name] secret." + return 1 + else + log_debug "Required credentials loaded from secret [$secret_name]" + return 0 + fi fi - else - log_verbose "Using existing secret [$secret_name]" - return 0 - fi } - -function get_credentials_from_secret { - - user=$1 - - user_upper=$(echo $user|tr '[a-z]' '[A-Z]') - secret_name="internal-user-$user" - user_var="ES_${user_upper}_USER" - passwd_var="ES_${user_upper}_PASSWD" - - - if [ -z "$(kubectl -n $LOG_NS get secret $secret_name -o name 2>/dev/null)" ]; then - log_error "The expected secret [$secret_name], containing the required credentials for the [$user] identity, was not found in namespace [$LOG_NS]" - return 1 - else - export $user_var=$(kubectl -n $LOG_NS get secret $secret_name -o=jsonpath="{.data.username}" 2>/dev/null |base64 --decode) - export $passwd_var=$(kubectl -n $LOG_NS get secret $secret_name -o=jsonpath="{.data.password}" 2>/dev/null |base64 --decode) - - if [ "${!user_var}" == "" ] || [ "${!passwd_var}" == "" ] ; then - log_error "Required credentials for the [$user] user not found in the [$secret_name] secret." - return 1 - else - log_debug "Required credentials loaded from secret [$secret_name]" - return 0 - fi - fi -} -