diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f55cc4a..fc8df417 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,12 @@ ## Unreleased * **Logging** + * [ANNOUNCEMENT] The [OpenDistro for Elasticsearch (ODFE) project](https://opendistro.github.io/for-elasticsearch/) reached end-of-line in May of 2022 and our project moved to + OpenSearch shorly thereafter. This release removes all remaining support for ODFE; including support for migration from ODFE + and ability to use utility scripts (e.g. change_internal_password.sh) with ealier ODFE-backed deployments. + * [REMOVAL] Remove support for migrating from an earlier deployment which included ODFE. + * [REMOVAL] Remove support for the `LOG_SEARCH_BACKEND` environment variable. Scripts will terminate with an ERROR message if + this environment variable is detected. * [CHANGE] The Fluent Bit Deployment used for Kubernetes Event collection can now be integrated into the SAS Viya Workload node placement strategy. * [CHANGE] An obsolete configuration file related to the Event Router has been removed from the repo. diff --git a/logging/bin/apiaccess-include.sh b/logging/bin/apiaccess-include.sh index 3aa313dd..31b6645a 100644 --- a/logging/bin/apiaccess-include.sh +++ b/logging/bin/apiaccess-include.sh @@ -168,13 +168,6 @@ function get_kb_api_url { # Global vars: kb_api_url - URL to access KB API/service # kbpfpid - process id of KB portforwarding - #NOTE: Use of args implemented to support migration of - # ODFE 1.7 Kibana content to OpenSearch Dashoards ONLY! - LOG_SEARCH_BACKEND=${1:-$LOG_SEARCH_BACKEND} - KB_SERVICENAME=${2:-$KB_SERVICENAME} - KB_SERVICEPORT=${3:-$KB_SERVICEPORT} - KB_INGRESSNAME=${4:-$KB_INGRESSNAME} - KB_TLS_ENABLED=${5} if [ -n "$kb_api_url" ]; then log_debug "Kibana API Endpoint already set [$kb_api_url]" @@ -183,14 +176,7 @@ function get_kb_api_url { pfPID="" - if [ -n "$KB_TLS_ENABLED" ]; then - tlsrequired="$KB_TLS_ENABLED" - log_debug "Kibana TLS setting [$KB_TLS_ENABLED] explicitly passed to get_kb_api_url" - elif [ "$LOG_SEARCH_BACKEND" != "OPENSEARCH" ]; then - tlsrequired="$(kubectl -n $LOG_NS get pod -l role=kibana -o=jsonpath='{.items[*].metadata.annotations.tls_required}')" - else - tlsrequired="$(kubectl -n $LOG_NS get secret v4m-osd-tls-enabled -o=jsonpath={.data.enable_tls} |base64 --decode)" - fi + tlsrequired="$(kubectl -n $LOG_NS get secret v4m-osd-tls-enabled -o=jsonpath={.data.enable_tls} |base64 --decode)" log_debug "TLS required to connect to Kibana? [$tlsrequired]" get_api_url "$KB_SERVICENAME" '{.spec.ports[?(@.name=="'${KB_SERVICEPORT}'")].port}' $tlsrequired $KB_INGRESSNAME diff --git a/logging/bin/change_internal_password.sh b/logging/bin/change_internal_password.sh index 185a6760..a868d8a4 100755 --- a/logging/bin/change_internal_password.sh +++ b/logging/bin/change_internal_password.sh @@ -27,15 +27,10 @@ function show_usage { echo "" } -if [ "$LOG_SEARCH_BACKEND" == "OPENSEARCH" ]; then - targetpod="v4m-search-0" - targetcontainer="opensearch" - toolsrootdir="/usr/share/opensearch/plugins/opensearch-security" -else - targetpod="v4m-es-master-0" - targetcontainer="elasticsearch" - toolsrootdir="/usr/share/elasticsearch/plugins/opendistro_security" -fi +# set vars used in curl commands +targetpod="v4m-search-0" +targetcontainer="opensearch" +toolsrootdir="/usr/share/opensearch/plugins/opensearch-security" USER_NAME=${1} NEW_PASSWD="${2}" @@ -258,17 +253,10 @@ if [ "$success" == "true" ]; then log_notice " *********** IMPORTANT NOTE *********** " log_notice " " log_notice " After changing the password for the [kibanaserver] user, you need to restart the " - if [ "$LOG_SEARCH_BACKEND" == "OPENSEARCH" ]; then - log_notice " OpenSearch Dashboards pod to ensure OpenSearch Dashboards can still be accessed and used. " - log_notice " " - log_notice " This can be done by submitting the following command: " - log_notice " kubectl -n $LOG_NS delete pods -l 'app=opensearch-dashboards'" - else - log_notice " Kibana pod to ensure Kibana can still be accessed and used." - log_notice "" - log_notice " This can be done by submitting the following command:" - log_notice " kubectl -n $LOG_NS delete pods -l 'app=v4m-es,role=kibana'" - fi + log_notice " OpenSearch Dashboards pod to ensure OpenSearch Dashboards can still be accessed and used. " + log_notice " " + log_notice " This can be done by submitting the following command: " + log_notice " kubectl -n $LOG_NS delete pods -l 'app=opensearch-dashboards'" echo "" ;; metricgetter) diff --git a/logging/bin/common.sh b/logging/bin/common.sh index e788519c..3fa80e93 100755 --- a/logging/bin/common.sh +++ b/logging/bin/common.sh @@ -15,16 +15,14 @@ if [ "$SAS_LOGGING_COMMON_SOURCED" = "" ]; then fi fi - - function require_opensearch { - if [ "$LOG_SEARCH_BACKEND" != "OPENSEARCH" ]; then - log_error "This script is only appropriate for use with OpenSearch as the search back-end." - log_error "The LOG_SEARCH_BACKEND environment variable is currently set to [$LOG_SEARCH_BACKEND]" - exit 1 - fi - } - export -f require_opensearch - + #Check for obsolete env var + if [ -n "$LOG_SEARCH_BACKEND" ]; then + log_error "Support for the LOG_SEARCH_BACKEND environment variable has been removed." + log_error "This script is only appropriate for use with OpenSearch as the search back-end." + log_error "The LOG_SEARCH_BACKEND environment variable is currently set to [$LOG_SEARCH_BACKEND]" + exit 1 + fi + export LOG_NS="${LOG_NS:-logging}" #if TLS (w/in cluster; for all monitoring components) is requested, require TLS into OSD pod, too @@ -35,33 +33,16 @@ if [ "$SAS_LOGGING_COMMON_SOURCED" = "" ]; then # TLS is required for logging components so hard-code to 'true' export TLS_ENABLE="true" - # OpenSearch or OpenDistro for Elasticsearch - export LOG_SEARCH_BACKEND="${LOG_SEARCH_BACKEND:-OPENSEARCH}" - log_debug "Search Backend set to [$LOG_SEARCH_BACKEND]" - - if [ "$LOG_SEARCH_BACKEND" == "OPENSEARCH" ]; then - export ES_SERVICENAME="v4m-search" - export ES_INGRESSNAME="v4m-search" - - export KB_SERVICENAME="v4m-osd" - export KB_INGRESSNAME="v4m-osd" - export KB_SERVICEPORT="http" + # set some OpenSearch/OSD env vars + export ES_SERVICENAME="v4m-search" + export ES_INGRESSNAME="v4m-search" - export ES_PLUGINS_DIR="_plugins" - export LOG_XSRF_HEADER="osd-xsrf:true" - else - export ES_SERVICENAME="v4m-es-client-service" - export ES_INGRESSNAME="v4m-es-client-service" - - export KB_SERVICENAME="v4m-es-kibana-svc" - export KB_INGRESSNAME="v4m-es-kibana-ing" - export KB_SERVICEPORT="kibana-svc" - - - export ES_PLUGINS_DIR="_opendistro" - export LOG_XSRF_HEADER="kbn-xsrf: true" - fi + export KB_SERVICENAME="v4m-osd" + export KB_INGRESSNAME="v4m-osd" + export KB_SERVICEPORT="http" + export ES_PLUGINS_DIR="_plugins" + export LOG_XSRF_HEADER="osd-xsrf:true" export V4M_NS=$LOG_NS @@ -73,8 +54,6 @@ if [ "$SAS_LOGGING_COMMON_SOURCED" = "" ]; then export SAS_LOGGING_COMMON_SOURCED=true - #Environment vars related to upgrading ODFE 1.7.0 to ODFE 1.13.x - export KB_GLOBAL_EXPORT_FILE=${KB_GLOBAL_EXPORT_FILE:-"$TMP_DIR/kibana_global_content.ndjson"} fi echo "" diff --git a/logging/bin/deploy_esexporter.sh b/logging/bin/deploy_esexporter.sh index 40da58bf..b0a03c68 100755 --- a/logging/bin/deploy_esexporter.sh +++ b/logging/bin/deploy_esexporter.sh @@ -20,9 +20,6 @@ fi set -e -#Fail if not using OpenSearch back-end -require_opensearch - log_info "Deploying Elasticsearch metric exporter ..." # check for pre-reqs diff --git a/logging/bin/deploy_fluentbit_k8sevents_opensearch.sh b/logging/bin/deploy_fluentbit_k8sevents_opensearch.sh index 881d8e4f..daae9a6e 100755 --- a/logging/bin/deploy_fluentbit_k8sevents_opensearch.sh +++ b/logging/bin/deploy_fluentbit_k8sevents_opensearch.sh @@ -33,9 +33,6 @@ else wnpValuesFile="$TMP_DIR/empty.yaml" fi -#Fail if not using OpenSearch back-end -require_opensearch - log_info "Deploying Fluent Bit for collecting Kubernetes Events..." #TO DO: Check that OpenSearch is actually deployed and running? diff --git a/logging/bin/deploy_fluentbit_opensearch.sh b/logging/bin/deploy_fluentbit_opensearch.sh index 24eeee06..5071d7a5 100755 --- a/logging/bin/deploy_fluentbit_opensearch.sh +++ b/logging/bin/deploy_fluentbit_opensearch.sh @@ -24,9 +24,6 @@ fi set -e -#Fail if not using OpenSearch back-end -require_opensearch - log_info "Deploying Fluent Bit ..." # check for pre-reqs diff --git a/logging/bin/deploy_logging.sh b/logging/bin/deploy_logging.sh index f7702211..3091140e 100755 --- a/logging/bin/deploy_logging.sh +++ b/logging/bin/deploy_logging.sh @@ -7,9 +7,6 @@ cd "$(dirname $BASH_SOURCE)/../.." source logging/bin/common.sh source bin/autogenerate-include.sh -#Fail if not using OpenSearch back-end -require_opensearch - # Confirm NOT on OpenShift if [ "$OPENSHIFT_CLUSTER" == "true" ]; then if [ "${CHECK_OPENSHIFT_CLUSTER:-true}" == "true" ]; then diff --git a/logging/bin/deploy_logging_openshift.sh b/logging/bin/deploy_logging_openshift.sh index 38bb17bd..a113581a 100755 --- a/logging/bin/deploy_logging_openshift.sh +++ b/logging/bin/deploy_logging_openshift.sh @@ -6,12 +6,6 @@ cd "$(dirname $BASH_SOURCE)/../.." source logging/bin/common.sh -################################## -# Confirm using OpenSearch # -################################## -require_opensearch - - ################################## # Confirm on OpenShift # ################################## diff --git a/logging/bin/deploy_opensearch.sh b/logging/bin/deploy_opensearch.sh index bcae9dcf..8b212d67 100755 --- a/logging/bin/deploy_opensearch.sh +++ b/logging/bin/deploy_opensearch.sh @@ -1,6 +1,6 @@ #! /bin/bash -# Copyright © 2022, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# Copyright © 2022-2025, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 cd "$(dirname $BASH_SOURCE)/../.." @@ -110,9 +110,6 @@ set -e # check for pre-reqs # -#Fail if not using OpenSearch back-end -require_opensearch - checkDefaultStorageClass # Confirm namespace exists @@ -124,9 +121,9 @@ fi #Generate yaml files with all container-related keys generateImageKeysFile "$OS_FULL_IMAGE" "logging/opensearch/os_container_image.template" generateImageKeysFile "$OS_SYSCTL_FULL_IMAGE" "$imageKeysFile" "OS_SYSCTL_" + #Copy imageKeysFile since next call will replace existing one cp "$imageKeysFile" "$TMP_DIR/opensearch_imagekeysfile.yaml" - generateImageKeysFile "$OS_FULL_IMAGE" "logging/opensearch/os_initcontainer_image.template" "" "true" # get credentials @@ -223,10 +220,6 @@ fi helmRepoAdd opensearch https://opensearch-project.github.io/helm-charts -## Commenting out as it might be redundant code -# log_verbose "Updating Helm repositories" -# helm repo update - # Check for existing OpenSearch helm release if [ "$(helm -n $LOG_NS list --filter 'opensearch' -q)" == "opensearch" ]; then log_debug "The Helm release [opensearch] already exists; upgrading the release." @@ -241,94 +234,11 @@ helm2ReleaseCheck odfe-$LOG_NS # Check for existing Open Distro helm release if [ "$(helm -n $LOG_NS list --filter 'odfe' -q)" == "odfe" ]; then - log_info "An existing ODFE-based deployment was detected; migrating to an OpenSearch-based deployment." - existingODFE="true" - - # - #Migrate Kibana content if upgrading from ODFE 1.7.0 - # - if [ "$(helm -n $LOG_NS list -o yaml --filter odfe |grep app_version)" == "- app_version: 1.8.0" ]; then - - # Prior to our 1.1.0 release we used ODFE 1.7.0 - log_info "Migrating content from Open Distro for Elasticsearch 1.7.0" - - #export exisiting content from global tenant - #KB_GLOBAL_EXPORT_FILE="$TMP_DIR/kibana_global_content.ndjson" - - log_debug "Exporting exisiting content from global tenant to temporary file [$KB_GLOBAL_EXPORT_FILE]." - - set +e - - #Need to connect to existing ODFE instance: - # *unset vars returned by call to get_kb_api_url to force regeneration - # *pass ODFE-specific values to get_kb_api_url - unset kb_api_url - unset kbpfpid - get_kb_api_url "ODFE" "v4m-es-kibana-svc" "kibana-svc" "v4m-es-kibana-ing" "false" - - #Need to confirm KB URL works...might not if TLS enabled. - #If that's the case, reset things and do it again with TLS=true. - - response=$(curl -s -o /dev/null -w "%{http_code}" -XGET "${kb_api_url}/status" -u $ES_ADMIN_USER:$ES_ADMIN_PASSWD -k) - - if [[ $response != 2* ]]; then - log_debug "Unable to connect to Kibana using HTTP; will try using HTTPS" - stop_kb_portforwarding - unset kb_api_url - unset kbpfpid - get_kb_api_url "ODFE" "v4m-es-kibana-svc" "kibana-svc" "v4m-es-kibana-ing" "true" - else - log_debug "Confirmed connection to Kibana" - fi - - content2export='{"type": ["config", "url","visualization", "dashboard", "search", "index-pattern"],"excludeExportDetails": false}' - - #The 'kb-xsrf' reference below is correct since we are interacting with ODFE KB - response=$(curl -s -o $KB_GLOBAL_EXPORT_FILE -w "%{http_code}" -XPOST "${kb_api_url}/api/saved_objects/_export" -d "$content2export" -H "kbn-xsrf: true" -H 'Content-Type: application/json' -u $ES_ADMIN_USER:$ES_ADMIN_PASSWD -k) - - if [[ $response != 2* ]]; then - log_warn "There was an issue exporting the existing content from Kibana [$response]" - log_debug "Failed response details: $(tail -n1 $KB_GLOBAL_EXPORT_FILE)" - #TODO: Exit here? Display messages as shown? Add BIG MESSAGE about potential loss of content? - else - log_info "Content from existing Kibana instance cached for migration. [$response]" - log_debug "Export details: $(tail -n1 $KB_GLOBAL_EXPORT_FILE)" - fi - - #Remove traces of ODFE interaction - stop_kb_portforwarding - unset kb_api_url - unset kbpfpid - fi - - # - # Upgrade from ODFE to OpenSearch - # - - # Remove Fluent Bit Helm release to - # avoid losing log messages during transition - if helm3ReleaseExists v4m-fb $LOG_NS; then - log_debug "Removing the Fluent Bit Helm release" - helm -n $LOG_NS delete v4m-fb - fi - - # Remove the existing ODFE Helm release - log_debug "Removing an existing ODFE Helm release" - helm -n $LOG_NS delete odfe - sleep 20 - - #bypass security setup since - #it was already configured - existingSearch=true + log_error "An existing ODFE-based deployment was detected. It must be removed before deploying the current version." + exit 1 - #Migrate PVCs - source logging/bin/migrate_odfe_pvcs-include.sh -else - log_debug "No obsolete Helm release of [odfe] was found." - existingODFE="false" fi - # OpenSearch user customizations ES_OPEN_USER_YAML="${ES_OPEN_USER_YAML:-$USER_DIR/logging/user-values-opensearch.yaml}" if [ ! -f "$ES_OPEN_USER_YAML" ]; then @@ -427,31 +337,6 @@ helm $helmDebug upgrade --install opensearch \ $versionstring \ $chart2install -# ODFE => OpenSearch Migration -if [ "$deploy_temp_masters" == "true" ]; then - - #NOTE: rbac.create set to 'false' since ServiceAccount - # was created during prior Helm chart deployment - log_debug "Upgrade from ODFE to OpenSearch detected; creating temporary master-only nodes." - helm $helmDebug upgrade --install opensearch-master \ - --namespace $LOG_NS \ - --values logging/opensearch/opensearch_helm_values.yaml \ - --values "$imageKeysFile" \ - --values "$wnpValuesFile" \ - --values "$ES_OPEN_USER_YAML" \ - --values "$OPENSHIFT_SPECIFIC_YAML" \ - --set nodeGroup=temp_masters \ - --set ingress.enabled=false \ - --set replicas=2 \ - --set roles={master} \ - --set rbac.create=false \ - --set masterService=v4m-search \ - --set fullnameOverride=v4m-master \ - $versionstring \ - $chart2install -fi - - # waiting for PVCs to be bound declare -i pvcCounter=0 pvc_status=$(kubectl -n $LOG_NS get pvc v4m-search-v4m-search-0 -o=jsonpath="{.status.phase}") @@ -483,61 +368,11 @@ kubectl -n $LOG_NS wait pods v4m-search-0 --for=condition=Ready --timeout=10m log_verbose "Waiting [2] minutes to allow OpenSearch to initialize [$(date)]" sleep 120 -# ODFE => OpenSearch Migration -if [ "$deploy_temp_masters" == "true" ]; then - - log_verbose "Upgrade to OpenSearch from Open Distro for Elasticsearch processing continues..." - log_info "Waiting up to [3] minutes for 'master-only' ES pods to be Ready [$(date)]" - kubectl -n $LOG_NS wait pods -l app.kubernetes.io/instance=opensearch-master --for=condition=Ready --timeout=3m - - #TODO: Remove 'master-only' nodes from list of 'master-eligible' ES nodes via API call? - # get_es_api_url - # curl call to remove 'master-only' nodes from list of 'master-eligible' nodes - # curl -X POST $es_api_url/_cluster/voting_config_exclusions?node_names=v4m-master-0,v4m-master-1,v4m-master-2 - # sleep 60 - # Probably (?) can skip the scale down and just uninstall Helm release - - - # Scale down master statefulset by 1 (to 1) - log_debug "Removing 'master-only' ES nodes needed only during upgrade to OpenSearch" - - kubectl -n $LOG_NS scale statefulset v4m-master --replicas 1 - ## wait for 1 minute (probably excessive, but...) - sleep 30 - - #Scale down master statefulset by 1 (to 0) - kubectl -n $LOG_NS scale statefulset v4m-master --replicas 0 - ##wait for 1 minute (probably excessive, but...) - sleep 30 - - #uninstall the Helm release - helm -n $LOG_NS delete opensearch-master - ##wait for 30 secs? 1 min? - sleep 30 - - #Delete "master" PVCs - ## Add labels? Appears labels were overwritten by Helm chart - kubectl -n $LOG_NS delete pvc v4m-master-v4m-master-0 v4m-master-v4m-master-1 v4m-master-v4m-master-2 --ignore-not-found -fi - -# Reconcile count of 'data' nodes -if [ "$existingODFE" == "true" ]; then - - min_data_nodes=$((odfe_data_pvc_count - 1)) - search_node_count=$(kubectl -n $LOG_NS get statefulset v4m-search -o jsonpath='{.spec.replicas}' 2>/dev/null) - - if [ "$search_node_count" -gt "0" ] && [ "$min_data_nodes" -gt "0" ] && [ "$search_node_count" -lt "$min_data_nodes" ]; then - log_warn "There were insufficient OpenSearch nodes [$search_node_count] configured to handle all of the data from the original ODFE 'data' nodes" - log_warn "This OpenSearch cluster has been scaled up to [$min_data_nodes] nodes to ensure no loss of data." - kubectl -n $LOG_NS scale statefulset v4m-search --replicas=$min_data_nodes - fi -fi - set +e # Run the security admin script on the pod # Add some logic to find ES release -if [ "$existingSearch" == "false" ] && [ "$existingODFE" != "true" ]; then +if [ "$existingSearch" == "false" ] ; then kubectl -n $LOG_NS exec v4m-search-0 -c opensearch -- config/run_securityadmin.sh # Retrieve log file from security admin script kubectl -n $LOG_NS cp v4m-search-0:config/run_securityadmin.log $TMP_DIR/run_securityadmin.log -c opensearch diff --git a/logging/bin/deploy_opensearch_content.sh b/logging/bin/deploy_opensearch_content.sh index 2748f8ba..d0976b30 100755 --- a/logging/bin/deploy_opensearch_content.sh +++ b/logging/bin/deploy_opensearch_content.sh @@ -28,9 +28,6 @@ tmpfile=$TMP_DIR/output.txt set -e # check for pre-reqs -#Fail if not using OpenSearch back-end -require_opensearch - # Confirm namespace exists if [ "$(kubectl get ns $LOG_NS -o name 2>/dev/null)" == "" ]; then diff --git a/logging/bin/deploy_openshift_prereqs.sh b/logging/bin/deploy_openshift_prereqs.sh index 35cbf285..6e338781 100755 --- a/logging/bin/deploy_openshift_prereqs.sh +++ b/logging/bin/deploy_openshift_prereqs.sh @@ -8,9 +8,6 @@ source logging/bin/common.sh this_script=`basename "$0"` -#Fail if not using OpenSearch back-end -require_opensearch - log_debug "Script [$this_script] has started [$(date)]" # Deploy OpenShift-specific pre-reqs? diff --git a/logging/bin/deploy_osd.sh b/logging/bin/deploy_osd.sh index 970c262b..941c1111 100755 --- a/logging/bin/deploy_osd.sh +++ b/logging/bin/deploy_osd.sh @@ -27,9 +27,6 @@ set -e # check for pre-reqs # -#Fail if not using OpenSearch back-end -require_opensearch - #Generate yaml file with all container-related keys generateImageKeysFile "$OSD_FULL_IMAGE" "logging/opensearch/osd_container_image.template" diff --git a/logging/bin/deploy_osd_content.sh b/logging/bin/deploy_osd_content.sh index 164e1e09..b16aee0b 100755 --- a/logging/bin/deploy_osd_content.sh +++ b/logging/bin/deploy_osd_content.sh @@ -21,9 +21,6 @@ if [ "$KIBANA_CONTENT_DEPLOY" != "true" ]; then exit 0 fi -#Fail if not using OpenSearch back-end -require_opensearch - # temp file used to capture command output tmpfile=$TMP_DIR/output.txt @@ -101,41 +98,6 @@ else log_debug "The OpenSearch Dashboards tenant space [cluster_admins] exists." fi -#Migrating from ODFE 1.7.0 (file should only exist during migration) -if [ -f "$KB_GLOBAL_EXPORT_FILE" ]; then - - # delete 'demo' Kibana tenant space created (but not used) prior to V4m version 1.1.0 - if kibana_tenant_exists "admin_tenant"; then - - delete_kibana_tenant "admin_tenant" - - rc=$? - if [ "$rc" == "0" ]; then - log_debug "The tenant space [admin_tenant] was deleted." - else - log_debug "Problems were encountered while attempting to delete tenant space [admin_tenant]." - fi - fi - - log_verbose "Will attempt to migrate content from previous deployment." - - kb_migrate_response="$TMP_DIR/kb_migrate_response.json" - - #import previously exported content from global tenant - response=$(curl -s -o $kb_migrate_response -w "%{http_code}" -XPOST "${kb_api_url}/api/saved_objects/_import?overwrite=false" -H "$LOG_XSRF_HEADER" -H 'securitytenant: cluster_admins' --form file="@$KB_GLOBAL_EXPORT_FILE" -u $ES_ADMIN_USER:$ES_ADMIN_PASSWD -k) - - if [[ $response != 2* ]]; then - log_warn "There was an issue importing the cached existing content into the OpenSearch Dashboards tenant space [cluster_admins]. [$response]" - log_warn "Some of your existing content may need to be recreated or restored from your backup files." - log_debug "Failed response details: $(tail -n1 $kb_migrate_response)" - else - log_info "Existing content imported to [cluster_admins] OpenSearh Dashboards tenant space. [$response]" - log_debug "Import details: $(tail -n1 $kb_migrate_response)" - fi -else - log_debug "Migration from ODFE 1.7.0 *NOT* detected" -fi - # Import OSD Searches, Visualizations and Dashboard Objects using curl ./logging/bin/import_osd_content.sh logging/osd/common cluster_admins ./logging/bin/import_osd_content.sh logging/osd/cluster_admins cluster_admins diff --git a/logging/bin/migrate_odfe_pvcs-include.sh b/logging/bin/migrate_odfe_pvcs-include.sh deleted file mode 100644 index 31d7ef2e..00000000 --- a/logging/bin/migrate_odfe_pvcs-include.sh +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright © 2022, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -# This script is NOT intended to be run directly -# It is sourced (as needed) during the deployment of OpenSearch - -function get_odfe_pvcs { - local namespace role - namespace=$1 - role=$2 - - kubectl -n $namespace get pvc -l role=$role -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.volumeName}{"\t"}{.spec.resources.requests.storage}{"\t"}{.spec.storageClassName}{"\n"}{end}' -} - -function patch_odfe_pvc { - local somepvc newPVCName pvc pv size storageClass - somepvc=$1 - newPVCName=$2 - - pvcName=$(echo "$somepvc" | awk '{ print $1}') - pvName=$(echo "$somepvc" | awk '{ print $2}') - pvcSize=$(echo "$somepvc" | awk '{ print $3}') - storageClass=$(echo "$somepvc" | awk '{ print $4}') - - log_debug "PVC: $pvcName PV:$pvName SIZE:$pvcSize StorageClass: $storageClass New PVC Name:$newPVCName" - - log_debug "Patching reclaimPolicy on PV [pvName:$pvName]"; - kubectl patch pv $pvName -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'; - - log_debug "Creating new PVC [$newPVCName]" - printf "apiVersion: v1 \nkind: PersistentVolumeClaim \nmetadata:\n name: $newPVCName\nspec:\n accessModes:\n - ReadWriteOnce\n storageClassName: $storageClass\n resources:\n requests:\n storage: $pvcSize\n volumeName: $pvName" |kubectl -n $LOG_NS apply -f - - - # delete ODFE pvc - log_debug "Deleting existing ODFE PVC [$pvcName]" - kubectl -n $LOG_NS delete pvc $pvcName - - # remove link w/ODFE PVC from PV - log_debug "Removing obsolete link between PVC [$pvcName] and PV [$pvName]" - kubectl patch pv $pvName --type json -p '[{"op": "remove", "path": "/spec/claimRef"}]' - - # link PV to new PVC - log_debug "Explicitly linking PV [$pvName] to new PVC [$newPVCName]" - kubectl patch pv $pvName -p '{"spec": {"claimRef": {"kind": "PersistentVolumeClaim", "namespace": "'$LOG_NS'", "name": "'$newPVCName'"}}}' - - # Reset policy so PV is deleted when PVC is deleted - log_debug "Resetting reclaimPolicy on PV [pvName:$pvName]"; - kubectl patch pv $pvName -p '{"spec":{"persistentVolumeReclaimPolicy":"Delete"}}'; -} - - -# get list of existing ODFE 'master' pvcs -IFS=$'\n' odfe_master_pvcs=($(get_odfe_pvcs $LOG_NS master)) -odfe_master_pvc_count=${#odfe_master_pvcs[@]} -log_debug "Detected [$odfe_master_pvc_count] PVCs associated with role [master]" - -# get list of existing ODFE 'data' pvcs -IFS=$'\n' odfe_data_pvcs=($(get_odfe_pvcs $LOG_NS data)) -odfe_data_pvc_count=${#odfe_data_pvcs[@]} -log_debug "Detected [$odfe_data_pvc_count] PVCs associated with role [data]" - - -if [ "$LOG_DEBUG_ENABLE" == "true" ]; then - log_debug "List of MASTER PVCs found" - for (( i=0; i<${#odfe_master_pvcs[@]}; i++ )); do - thispvc=(${odfe_master_pvcs[$i]}) - log_debug "\t $i $thispvc" - done - - log_debug "List of DATA PVCs found" - for (( i=0; i<${#odfe_data_pvcs[@]}; i++ )); do - thispvc=(${odfe_data_pvcs[$i]}) - log_debug "\t $i $thispvc" - done -fi - - -if [ "$odfe_master_pvc_count" -gt 0 ] && [ "$odfe_data_pvc_count" -eq 0 ]; then - - log_debug "Only ODFE 'master' PVCs detected" - log_debug "The 'master' PVCs will be mapped to the primary OpenSearch PVCs" - - master_target="v4m-search-v4m-search-" # map ODFE 'master' PVCs to primary OpenSearch PVCs - deploy_temp_masters="false" # do NOT deploy temporary 'master' nodes - -elif [ "$odfe_master_pvc_count" -eq 1 ] && [ "$odfe_data_pvc_count" -eq 1 ]; then - # min-logging sample configuration - log_error "The current ODFE configuration (likely based on the min-logging sample) can NOT be migrated to OpenSearch" - log_info "You may be able to redeploy using an earlier release of SAS Viya Monitoring for Kubernetes to restore the current ODFE." - log_info "However, the underlying Open Distro for Elasticsearch technology is no longer actively maintained and doing so will make you vulnerable." - log_info "" - log_info "Or, you can re-run the deployment script and deploy an entirely new OpenSearch-based deployment of SAS Viya Monitoring for Kubernetes" - log_info "You can delete the existing PVCs that cannot be migrated and free up storage space by submitting a command:" - log_info " kubectl -n $LOG_NS delete pvc -l 'app=v4m-es, release=odfe' " - log_info "Note: any previously captured log messages will no longer be available." - - # Remove OpenSearch-specific configMap - kubectl -n $LOG_NS delete configmap run-securityadmin.sh --ignore-not-found - - exit 1 - -elif [ "$odfe_master_pvc_count" -gt 0 ] && [ "$odfe_data_pvc_count" -gt 0 ]; then - - log_debug "A mix of ODFE 'master' and 'data' nodes detected." - log_debug "The 'data' PVCs will be mapped to the primary OpenSearch PVCs and the 'master' PVCs to temporary PVCs to allow upgrade" - - data_target="v4m-search-v4m-search-" # map ODFE 'data' PVCs to primary OpenSearch PVCs - master_target="v4m-master-v4m-master-" # map ODFE 'master' PVCs to temporary OpenSearch temp_master PVCs - deploy_temp_masters="true" # deploy temporary 'master' nodes - -elif [ "$odfe_master_pvc_count" -eq 0 ] && [ "$odfe_data_pvc_count" -gt 0 ]; then - - log_debug "Only ODFE 'data' PVCs detected" - log_debug "The 'data' PVCs will be mapped to the primary OpenSearch PVCs" - - master_target="v4m-search-v4m-search-" # map ODFE 'master' PVCs to primary OpenSearch PVCs - deploy_temp_masters="false" # do NOT deploy temporary 'master' nodes - -else - log_warn "No existing ODFE PVCs detected; nothing to migrate." - existingODFE=false - existingSearch="false" -fi - -#Handle ODFE Master PVCs -if [ -n "$master_target" ]; then - for (( i=0; i<${#odfe_master_pvcs[@]}; i++ )); do - thispvc=(${odfe_master_pvcs[$i]}) - patch_odfe_pvc "$thispvc" "$master_target$i" - done -else - log_debug "Doing nothing with the existing ODFE 'master' PVCs" -fi - -#Handle ODFE Data PVCs -if [ -n "$data_target" ]; then - for (( i=0; i<${#odfe_data_pvcs[@]}; i++ )); do - thispvc=(${odfe_data_pvcs[$i]}) - patch_odfe_pvc "$thispvc" "$data_target$i" - done -else - log_debug "Doing nothing with the existing ODFE 'data' PVCs" -fi -