diff --git a/addons/elasticsearch/dataprotection/backup.sh b/addons/elasticsearch/dataprotection/backup.sh index da7926f80..0c0dbb1f0 100644 --- a/addons/elasticsearch/dataprotection/backup.sh +++ b/addons/elasticsearch/dataprotection/backup.sh @@ -28,7 +28,7 @@ trap handle_exit EXIT function getToolConfigValue() { local var=$1 - cat $toolConfig | grep "$var[[:space:]]*=" | awk '{print $NF}' + cat $toolConfig | grep "${var}[[:space:]]*=" | awk '{print $NF}' } s3_endpoint=$(getToolConfigValue endpoint) @@ -94,6 +94,18 @@ done echo "INFO: All nodes keystore configured, reloading secure settings" curl -X POST "${ES_ENDPOINT}/_nodes/reload_secure_settings" +# Wait for secure settings to be reloaded and plugins to be loaded +echo "INFO: Waiting for nodes to be ready after reloading secure settings..." +sleep 10 + +# Verify that S3 repository plugin is available +echo "INFO: Checking if S3 repository plugin is available..." +if curl -s -f "${ES_ENDPOINT}/_snapshot" | grep -q 's3'; then + echo "INFO: S3 repository plugin is available" +else + echo "WARNING: S3 repository plugin may not be available, attempting to create repository anyway" +fi + # DP_BACKUP_BASE_PATH is the path to the backup directory # if the target policy is All, the path pattern is: /${namespace}/${clusterName}-${clusterUID}/${componentDef}/${backupName}/${podName} # if the target policy is Any, the path pattern is: /${namespace}/${clusterName}-${clusterUID}/${componentDef}/${backupName} @@ -105,7 +117,7 @@ base_path=${base_path#*/} function wait_for_snapshot_completion() { while true; do - state=$(curl -s -X GET "${ES_ENDPOINT}/_snapshot/${REPOSITORY}/${DP_BACKUP_NAME}?sort=name&pretty" | grep -w state | awk '{print $NF}' | tr -d ',"') + state=$(curl -s -X GET "${ES_ENDPOINT}/_snapshot/${REPOSITORY}/${DP_BACKUP_NAME}?pretty" | grep -w state | awk '{print $NF}' | tr -d ',"') if [ "$state" == "SUCCESS" ]; then echo "INFO: backup success" break diff --git a/addons/elasticsearch/dataprotection/restore.sh b/addons/elasticsearch/dataprotection/restore.sh index 9ea97f580..0bc74434f 100644 --- a/addons/elasticsearch/dataprotection/restore.sh +++ b/addons/elasticsearch/dataprotection/restore.sh @@ -22,7 +22,7 @@ trap handle_exit EXIT function getToolConfigValue() { local var=$1 - cat $toolConfig | grep "$var[[:space:]]*=" | awk '{print $NF}' + cat $toolConfig | grep "${var}[[:space:]]*=" | awk '{print $NF}' } s3_endpoint=$(getToolConfigValue endpoint) @@ -93,6 +93,25 @@ done echo "INFO: All nodes keystore configured for restore, reloading secure settings" curl -X POST "${ES_ENDPOINT}/_nodes/reload_secure_settings" +# Get Elasticsearch major version (call once at the beginning) +function get_es_major_version() { + local version_info=$(curl -s ${BASIC_AUTH} -X GET "${ES_ENDPOINT}/_nodes" | grep -o '"version":"[^"]*"' | head -1 | cut -d'"' -f4) + if [ -z "$version_info" ]; then + echo "ERROR: Failed to get Elasticsearch version" >&2 + return 1 + fi + # Extract major version (e.g., "7.10.1" -> "7") + echo "$version_info" | cut -d'.' -f1 +} + +echo "INFO: Getting Elasticsearch version" +es_major_version=$(get_es_major_version) +if [ $? -ne 0 ]; then + echo "ERROR: Failed to get Elasticsearch version" + exit 1 +fi +echo "INFO: Detected Elasticsearch major version: $es_major_version" + cat > /tmp/repository.json<< EOF { "type": "s3", @@ -120,14 +139,20 @@ function enable_indexing_and_geoip() { exit 1 ;; esac - curl -f -X PUT "${ES_ENDPOINT}/_cluster/settings?pretty" -H 'Content-Type: application/json' -d' - { - "persistent": { - "ingest.geoip.downloader.enabled": '$switch', - "indices.lifecycle.history_index_enabled": '$switch' - } - } - ' + + # Check if version is less than 8.0 + if [ "$es_major_version" -lt 8 ]; then + echo "WARNING: Elasticsearch version is $es_major_version.x, which does not support dynamically updating 'indices.lifecycle.history_index_enabled' and 'ingest.geoip.downloader.enabled'. " + else + curl -f -X PUT "${ES_ENDPOINT}/_cluster/settings?pretty" -H 'Content-Type: application/json' -d' + { + "persistent": { + "ingest.geoip.downloader.enabled": '$switch', + "indices.lifecycle.history_index_enabled": '$switch' + } + } + ' + fi } function switch_ilm() { @@ -258,7 +283,12 @@ fi enable_destructive_requires_name false # Delete all existing data streams on the cluster. -curl -f -s -X DELETE "${ES_ENDPOINT}/_data_stream/*?expand_wildcards=all&pretty" +# expand_wildcards parameter is not supported in ES 7.x for data stream deletion +if [ "$es_major_version" -lt 8 ]; then + curl -f -s -X DELETE "${ES_ENDPOINT}/_data_stream/*?pretty" +else + curl -f -s -X DELETE "${ES_ENDPOINT}/_data_stream/*?expand_wildcards=all&pretty" +fi # Delete all existing indices on the cluster. curl -f -s -X DELETE "${ES_ENDPOINT}/*?expand_wildcards=all&pretty" diff --git a/addons/elasticsearch/plugins/Dockerfile b/addons/elasticsearch/plugins/Dockerfile index f1e986b63..878859541 100644 --- a/addons/elasticsearch/plugins/Dockerfile +++ b/addons/elasticsearch/plugins/Dockerfile @@ -2,6 +2,24 @@ FROM alpine:3.19.1 WORKDIR /plugins -RUN wget https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v8.8.2/elasticsearch-analysis-ik-8.8.2.zip \ - && unzip elasticsearch-analysis-ik-8.8.2.zip -d ./analysis-ik/ \ - && rm elasticsearch-analysis-ik-8.8.2.zip +# Install all plugins +RUN set -e && \ + # IK plugins for all versions \ + for version in 6.8.23 7.7.1 7.8.1 7.10.1 8.1.3 8.8.2 8.15.5; do \ + echo "Installing IK plugin for Elasticsearch $version"; \ + mkdir -p ./${version}/ik && \ + wget -q https://release.infinilabs.com/analysis-ik/stable/elasticsearch-analysis-ik-${version}.zip && \ + unzip -q elasticsearch-analysis-ik-${version}.zip -d ./${version}/ik/ && \ + rm elasticsearch-analysis-ik-${version}.zip; \ + done && \ + \ + # S3 plugins for 6.x and 7.x (8.x has it built-in) \ + for version in 6.8.23 7.7.1 7.8.1 7.10.1; do \ + echo "Installing S3 plugin for Elasticsearch $version"; \ + mkdir -p ./${version}/s3 && \ + wget -q https://artifacts.elastic.co/downloads/elasticsearch-plugins/repository-s3/repository-s3-${version}.zip && \ + unzip -q repository-s3-${version}.zip -d ./${version}/s3/ && \ + rm repository-s3-${version}.zip; \ + done && \ + \ + echo "All plugins installed successfully" diff --git a/addons/elasticsearch/scripts/install-plugins.sh b/addons/elasticsearch/scripts/install-plugins.sh index e3462f147..20d44c797 100644 --- a/addons/elasticsearch/scripts/install-plugins.sh +++ b/addons/elasticsearch/scripts/install-plugins.sh @@ -10,6 +10,15 @@ if [ ! -d $src_plugins_dir ]; then exit 0 fi +# [root@35bde36fc986 elasticsearch]# elasticsearch --version +# Version: 7.10.1, Build: default/docker/1c34507e66d7db1211f66f3513706fdf548736aa/2020-12-05T01:00:33.671820Z, JVM: 15.0.1 +ELASTICSEARCH_VERSION=$(elasticsearch --version | grep "Version:" | awk '{print $2}' | sed 's/,.*//') + +if [ -z "$ELASTICSEARCH_VERSION" ]; then + echo "ELASTICSEARCH_VERSION is not set" + exit 1 +fi + function native_install_plugin() { plugin=$1 msg=`/usr/share/elasticsearch/bin/elasticsearch-plugin install -b $plugin` @@ -36,12 +45,21 @@ function copy_install_plugin() { echo "successfully installed plugin $plugin" } -for plugin in $(ls $src_plugins_dir); do - # check if plugin has suffix .zip or .gz or .tar.gz - echo "installing plugin $plugin" - if [[ $plugin == *.zip || $plugin == *.gz || $plugin == *.tar.gz ]]; then - native_install_plugins $src_plugins_dir/$plugin - else - copy_install_plugin $src_plugins_dir/$plugin - fi -done +# Install version-specific plugins - simply install all plugins that exist for this version +echo "Installing plugins for Elasticsearch version $ELASTICSEARCH_VERSION" + +# Check if version-specific plugin directory exists +if [ -d "$src_plugins_dir/$ELASTICSEARCH_VERSION" ]; then + echo "Found plugin directory for version $ELASTICSEARCH_VERSION" + + # Install all plugin subdirectories that exist + for plugin_dir in "$src_plugins_dir/$ELASTICSEARCH_VERSION"/*/; do + if [ -d "$plugin_dir" ]; then + plugin_name=$(basename "$plugin_dir") + echo "Installing $plugin_name plugin for version $ELASTICSEARCH_VERSION" + copy_install_plugin "$plugin_dir" + fi + done +else + echo "No plugin directory found for version $ELASTICSEARCH_VERSION" +fi diff --git a/addons/elasticsearch/templates/_helpers.tpl b/addons/elasticsearch/templates/_helpers.tpl index e256e4592..437509e08 100644 --- a/addons/elasticsearch/templates/_helpers.tpl +++ b/addons/elasticsearch/templates/_helpers.tpl @@ -72,6 +72,14 @@ Define elasticsearch component definition regex pattern ^elasticsearch {{- end -}} +{{- define "elasticsearchMaster.cmpdRegexPattern" -}} +^elasticsearch-master- +{{- end -}} + +{{- define "elasticsearchData.cmpdRegexPattern" -}} +^elasticsearch-data- +{{- end -}} + {{- define "elasticsearch.scriptsTplName" -}} elasticsearch-scripts-tpl {{- end -}} @@ -200,6 +208,44 @@ kibana-7-config-tpl kibana-8-config-tpl {{- end -}} +{{- define "kibana.probe" -}} +exec: + command: + - bash + - -c + - | + #!/usr/bin/env bash -e + + # Disable nss cache to avoid filling dentry cache when calling curl + # This is required with Kibana Docker using nss < 3.52 + export NSS_SDB_USE_CACHE=no + + http () { + local path="${1}" + set -- -XGET -s --fail -L + + if [ -n "${ELASTICSEARCH_USERNAME}" ] && [ -n "${ELASTICSEARCH_PASSWORD}" ]; then + set -- "$@" -u "${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}" + fi + + if [ -n "${KB_TLS_CERT_FILE}" ]; then + READINESS_PROBE_PROTOCOL=https + else + READINESS_PROBE_PROTOCOL=http + fi + endpoint="${READINESS_PROBE_PROTOCOL}://${POD_IP}:5601" + STATUS=$(curl --output /dev/null --write-out "%{http_code}" -k "$@" "${endpoint}${path}") + if [[ "${STATUS}" -eq 200 ]]; then + exit 0 + fi + + echo "Error: Got HTTP code ${STATUS} but expected a 200" + exit 1 + } + + http "/app/kibana" +{{- end -}} + {{- define "elasticsearch.common" }} provider: kubeblocks description: Elasticsearch is a distributed, RESTful search engine optimized for speed and relevance on production-scale workloads. @@ -547,4 +593,163 @@ runtime: name: local-plugins - emptyDir: { } name: plugins -{{- end }} \ No newline at end of file +{{- end }} + +{{- define "kibana.common" }} +provider: kubeblocks +description: Kibana is a browser-based analytics and search dashboard for Elasticsearch. +serviceKind: kibana +updateStrategy: Parallel +services: +- name: http + serviceName: http + spec: + ipFamilyPolicy: PreferDualStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 5601 + targetPort: http +vars: +- name: ELASTIC_USER_PASSWORD + valueFrom: + credentialVarRef: + compDef: {{ include "elasticsearch.cmpdRegexPattern" . }} + name: elastic + optional: false + password: Required + multipleClusterObjectOption: + strategy: individual +- name: KIBANA_SYSTEM_USER_PASSWORD + valueFrom: + credentialVarRef: + compDef: {{ include "elasticsearch.cmpdRegexPattern" . }} + name: kibana_system + optional: false + password: Required + multipleClusterObjectOption: + strategy: individual +- name: ELASTICSEARCH_HOST + valueFrom: + serviceVarRef: + compDef: {{ include "elasticsearch.cmpdRegexPattern" . }} + name: http + host: Required + multipleClusterObjectOption: + strategy: individual +runtime: + containers: + - env: + - name: NSS_SDB_USE_CACHE + value: "no" + - name: CLUSTER_DOMAIN + value: {{ .Values.clusterDomain | quote }} + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - bash + - -c + - | + function info() { + echo "[$(date +'%Y-%m-%d %H:%M:%S')] $*" + } + if [ -n "${KB_TLS_CERT_FILE}" ]; then + READINESS_PROBE_PROTOCOL=https + else + READINESS_PROBE_PROTOCOL=http + fi + # All the components' password of elastic must be the same, So we find the first environment variable that starts with ELASTIC_USER_PASSWORD + ELASTIC_AUTH_PASSWORD="" + if [ -n "${KB_TLS_CERT_FILE}" ]; then + last_value="" + set +x + for env_var in $(env | grep -E '^ELASTIC_USER_PASSWORD'); do + value="${env_var#*=}" + if [ -n "$value" ]; then + if [ -n "$last_value" ] && [ "$last_value" != "$value" ]; then + echo "Error conflicting env $env_var of elastic password values found, all the components' password of elastic must be the same." + exit 1 + fi + last_value="$value" + fi + done + ELASTIC_AUTH_PASSWORD="$last_value" + fi + for env_var in $(env | grep -E '^ELASTICSEARCH_HOST'); do + value="${env_var#*=}" + if [ -n "$value" ]; then + ELASTICSEARCH_HOST="$value" + break + fi + done + if [ -z "$ELASTICSEARCH_HOST" ]; then + echo "Invalid ELASTICSEARCH_HOST" + exit 1 + fi + endpoint="${READINESS_PROBE_PROTOCOL}://${ELASTICSEARCH_HOST}.${KB_NAMESPACE}.svc.${CLUSTER_DOMAIN}:9200" + common_options="-s -u elastic:${ELASTIC_AUTH_PASSWORD} --fail --connect-timeout 3 -k" + while true; do + if [ -n "${KB_TLS_CERT_FILE}" ]; then + out=$(curl ${common_options} -X GET "${endpoint}/kubeblocks_ca_crt/_doc/1?pretty") + if [ $? == 0 ]; then + echo "$out" | grep '"ca.crt" :' | awk -F: '{print $2}' | tr -d '",' | xargs | base64 -d > /tmp/elastic.ca.crt + info "elasticsearch is ready" + break + fi + else + curl ${common_options} -X GET "${endpoint}" + if [ $? == 0 ]; then + info "elasticsearch is ready" + break + fi + fi + info "waiting for elasticsearch to be ready" + sleep 1 + done + if [ -f /bin/tini ]; then + /bin/tini -- /usr/local/bin/kibana-docker -e ${endpoint} -H ${POD_IP} + else + /usr/local/bin/kibana-docker -e ${endpoint} -H ${POD_IP} + fi + name: kibana + ports: + - containerPort: 5601 + name: http + protocol: TCP + startupProbe: + failureThreshold: 5 + initialDelaySeconds: 90 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + {{ include "kibana.probe" . | nindent 6 }} + readinessProbe: + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + {{ include "kibana.probe" . | nindent 6 }} + livenessProbe: + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + {{ include "kibana.probe" . | nindent 6 }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /usr/share/kibana/config + name: kibana-cm + securityContext: + fsGroup: 1000 +{{- end }} diff --git a/addons/elasticsearch/templates/backuppolicytemplate.yaml b/addons/elasticsearch/templates/backuppolicytemplate.yaml index e93d8bee0..0690ac548 100644 --- a/addons/elasticsearch/templates/backuppolicytemplate.yaml +++ b/addons/elasticsearch/templates/backuppolicytemplate.yaml @@ -9,7 +9,8 @@ spec: clusterDefinitionRef: elasticsearch backupPolicies: - componentDefs: - - elasticsearch + - {{ include "elasticsearchData.cmpdRegexPattern" . }} + - ^elasticsearch-\d+ backupMethods: - name: full-backup target: diff --git a/addons/elasticsearch/templates/cmpd-kibana-6.yaml b/addons/elasticsearch/templates/cmpd-kibana-6.yaml index 118d47f20..5a6ac09fd 100644 --- a/addons/elasticsearch/templates/cmpd-kibana-6.yaml +++ b/addons/elasticsearch/templates/cmpd-kibana-6.yaml @@ -7,187 +7,10 @@ metadata: labels: {{- include "elasticsearch.labels" . | nindent 4 }} spec: - provider: kubeblocks - description: Kibana is a browser-based analytics and search dashboard for Elasticsearch. - serviceKind: kibana serviceVersion: 6.8.23 - updateStrategy: Parallel - services: - - name: http - serviceName: http - spec: - ipFamilyPolicy: PreferDualStack - ipFamilies: - - IPv4 - ports: - - name: http - port: 5601 - targetPort: http - vars: - - name: ELASTIC_USER_PASSWORD - valueFrom: - credentialVarRef: - compDef: {{ include "elasticsearch.cmpdRegexPattern" . }} - name: elastic - optional: false - password: Required - multipleClusterObjectOption: - strategy: individual - - name: KIBANA_SYSTEM_USER_PASSWORD - valueFrom: - credentialVarRef: - compDef: {{ include "elasticsearch.cmpdRegexPattern" . }} - name: kibana_system - optional: false - password: Required - multipleClusterObjectOption: - strategy: individual - - name: ELASTICSEARCH_HOST - valueFrom: - serviceVarRef: - compDef: {{ include "elasticsearch.cmpdRegexPattern" . }} - name: http - host: Required - multipleClusterObjectOption: - strategy: individual configs: - name: kibana-cm templateRef: {{ include "kibana6.configTplName" . }} namespace: {{ .Release.Namespace }} volumeName: kibana-cm - runtime: - containers: - - env: - - name: NSS_SDB_USE_CACHE - value: "no" - - name: CLUSTER_DOMAIN - value: {{ .Values.clusterDomain | quote }} - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: - - bash - - -c - - | - function info() { - echo "[$(date +'%Y-%m-%d %H:%M:%S')] $*" - } - if [ -n "${KB_TLS_CERT_FILE}" ]; then - READINESS_PROBE_PROTOCOL=https - else - READINESS_PROBE_PROTOCOL=http - fi - # All the components' password of elastic must be the same, So we find the first environment variable that starts with ELASTIC_USER_PASSWORD - ELASTIC_AUTH_PASSWORD="" - if [ "${TLS_ENABLED}" == "true" ]; then - last_value="" - set +x - for env_var in $(env | grep -E '^ELASTIC_USER_PASSWORD'); do - value="${env_var#*=}" - if [ -n "$value" ]; then - if [ -n "$last_value" ] && [ "$last_value" != "$value" ]; then - echo "Error conflicting env $env_var of elastic password values found, all the components' password of elastic must be the same." - exit 1 - fi - last_value="$value" - fi - done - ELASTIC_AUTH_PASSWORD="$last_value" - fi - for env_var in $(env | grep -E '^ELASTICSEARCH_HOST'); do - value="${env_var#*=}" - if [ -n "$value" ]; then - ELASTICSEARCH_HOST="$value" - break - fi - done - if [ -z "$ELASTICSEARCH_HOST" ]; then - echo "Invalid ELASTICSEARCH_HOST" - exit 1 - fi - endpoint="${READINESS_PROBE_PROTOCOL}://${ELASTICSEARCH_HOST}.${KB_NAMESPACE}.svc.${CLUSTER_DOMAIN}:9200" - common_options="-s -u elastic:${ELASTIC_AUTH_PASSWORD} --fail --connect-timeout 3 -k" - while true; do - if [ -n "${KB_TLS_CERT_FILE}" ]; then - out=$(curl ${common_options} -X GET "${endpoint}/kubeblocks_ca_crt/_doc/1?pretty") - if [ $? == 0 ]; then - echo "$out" | grep '"ca.crt" :' | awk -F: '{print $2}' | tr -d '",' | xargs | base64 -d > /tmp/elastic.ca.crt - info "elasticsearch is ready" - break - fi - else - curl ${common_options} -X GET "${endpoint}" - if [ $? == 0 ]; then - info "elasticsearch is ready" - break - fi - fi - info "waiting for elasticsearch to be ready" - sleep 1 - done - if [ -f /bin/tini ]; then - /bin/tini -- /usr/local/bin/kibana-docker -e ${endpoint} -H ${POD_IP} - else - /usr/local/bin/kibana-docker -e ${endpoint} -H ${POD_IP} - fi - name: kibana - ports: - - containerPort: 5601 - name: http - protocol: TCP - readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 3 - timeoutSeconds: 5 - exec: - command: - - bash - - -c - - | - #!/usr/bin/env bash -e - - # Disable nss cache to avoid filling dentry cache when calling curl - # This is required with Kibana Docker using nss < 3.52 - export NSS_SDB_USE_CACHE=no - - http () { - local path="${1}" - set -- -XGET -s --fail -L - - if [ -n "${ELASTICSEARCH_USERNAME}" ] && [ -n "${ELASTICSEARCH_PASSWORD}" ]; then - set -- "$@" -u "${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}" - fi - - if [ -n "${KB_TLS_CERT_FILE}" ]; then - READINESS_PROBE_PROTOCOL=https - else - READINESS_PROBE_PROTOCOL=http - fi - endpoint="${READINESS_PROBE_PROTOCOL}://${POD_IP}:5601" - STATUS=$(curl --output /dev/null --write-out "%{http_code}" -k "$@" "${endpoint}${path}") - if [[ "${STATUS}" -eq 200 ]]; then - exit 0 - fi - - echo "Error: Got HTTP code ${STATUS} but expected a 200" - exit 1 - } - - http "/app/kibana" - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - runAsNonRoot: true - runAsUser: 1000 - volumeMounts: - - mountPath: /usr/share/kibana/config - name: kibana-cm - securityContext: - fsGroup: 1000 \ No newline at end of file + {{ include "kibana.common" . | nindent 2 }} \ No newline at end of file diff --git a/addons/elasticsearch/templates/cmpd-kibana-7.yaml b/addons/elasticsearch/templates/cmpd-kibana-7.yaml index 67dcd5e05..73a0a2cf4 100644 --- a/addons/elasticsearch/templates/cmpd-kibana-7.yaml +++ b/addons/elasticsearch/templates/cmpd-kibana-7.yaml @@ -7,187 +7,10 @@ metadata: labels: {{- include "elasticsearch.labels" . | nindent 4 }} spec: - provider: kubeblocks - description: Kibana is a browser-based analytics and search dashboard for Elasticsearch. - serviceKind: kibana serviceVersion: 7.10.1 - updateStrategy: Parallel - services: - - name: http - serviceName: http - spec: - ipFamilyPolicy: PreferDualStack - ipFamilies: - - IPv4 - ports: - - name: http - port: 5601 - targetPort: http - vars: - - name: ELASTIC_USER_PASSWORD - valueFrom: - credentialVarRef: - compDef: {{ include "elasticsearch.cmpdRegexPattern" . }} - name: elastic - optional: false - password: Required - multipleClusterObjectOption: - strategy: individual - - name: KIBANA_SYSTEM_USER_PASSWORD - valueFrom: - credentialVarRef: - compDef: {{ include "elasticsearch.cmpdRegexPattern" . }} - name: kibana_system - optional: false - password: Required - multipleClusterObjectOption: - strategy: individual - - name: ELASTICSEARCH_HOST - valueFrom: - serviceVarRef: - compDef: {{ include "elasticsearch.cmpdRegexPattern" . }} - name: http - host: Required - multipleClusterObjectOption: - strategy: individual configs: - name: kibana-cm templateRef: {{ include "kibana7.configTplName" . }} namespace: {{ .Release.Namespace }} volumeName: kibana-cm - runtime: - containers: - - env: - - name: NSS_SDB_USE_CACHE - value: "no" - - name: CLUSTER_DOMAIN - value: {{ .Values.clusterDomain | quote }} - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: - - bash - - -c - - | - function info() { - echo "[$(date +'%Y-%m-%d %H:%M:%S')] $*" - } - if [ -n "${KB_TLS_CERT_FILE}" ]; then - READINESS_PROBE_PROTOCOL=https - else - READINESS_PROBE_PROTOCOL=http - fi - # All the components' password of elastic must be the same, So we find the first environment variable that starts with ELASTIC_USER_PASSWORD - ELASTIC_AUTH_PASSWORD="" - if [ "${TLS_ENABLED}" == "true" ]; then - last_value="" - set +x - for env_var in $(env | grep -E '^ELASTIC_USER_PASSWORD'); do - value="${env_var#*=}" - if [ -n "$value" ]; then - if [ -n "$last_value" ] && [ "$last_value" != "$value" ]; then - echo "Error conflicting env $env_var of elastic password values found, all the components' password of elastic must be the same." - exit 1 - fi - last_value="$value" - fi - done - ELASTIC_AUTH_PASSWORD="$last_value" - fi - for env_var in $(env | grep -E '^ELASTICSEARCH_HOST'); do - value="${env_var#*=}" - if [ -n "$value" ]; then - ELASTICSEARCH_HOST="$value" - break - fi - done - if [ -z "$ELASTICSEARCH_HOST" ]; then - echo "Invalid ELASTICSEARCH_HOST" - exit 1 - fi - endpoint="${READINESS_PROBE_PROTOCOL}://${ELASTICSEARCH_HOST}.${KB_NAMESPACE}.svc.${CLUSTER_DOMAIN}:9200" - common_options="-s -u elastic:${ELASTIC_AUTH_PASSWORD} --fail --connect-timeout 3 -k" - while true; do - if [ -n "${KB_TLS_CERT_FILE}" ]; then - out=$(curl ${common_options} -X GET "${endpoint}/kubeblocks_ca_crt/_doc/1?pretty") - if [ $? == 0 ]; then - echo "$out" | grep '"ca.crt" :' | awk -F: '{print $2}' | tr -d '",' | xargs | base64 -d > /tmp/elastic.ca.crt - info "elasticsearch is ready" - break - fi - else - curl ${common_options} -X GET "${endpoint}" - if [ $? == 0 ]; then - info "elasticsearch is ready" - break - fi - fi - info "waiting for elasticsearch to be ready" - sleep 1 - done - if [ -f /bin/tini ]; then - /bin/tini -- /usr/local/bin/kibana-docker -e ${endpoint} -H ${POD_IP} - else - /usr/local/bin/kibana-docker -e ${endpoint} -H ${POD_IP} - fi - name: kibana - ports: - - containerPort: 5601 - name: http - protocol: TCP - readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 3 - timeoutSeconds: 5 - exec: - command: - - bash - - -c - - | - #!/usr/bin/env bash -e - - # Disable nss cache to avoid filling dentry cache when calling curl - # This is required with Kibana Docker using nss < 3.52 - export NSS_SDB_USE_CACHE=no - - http () { - local path="${1}" - set -- -XGET -s --fail -L - - if [ -n "${ELASTICSEARCH_USERNAME}" ] && [ -n "${ELASTICSEARCH_PASSWORD}" ]; then - set -- "$@" -u "${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}" - fi - - if [ -n "${KB_TLS_CERT_FILE}" ]; then - READINESS_PROBE_PROTOCOL=https - else - READINESS_PROBE_PROTOCOL=http - fi - endpoint="${READINESS_PROBE_PROTOCOL}://${POD_IP}:5601" - STATUS=$(curl --output /dev/null --write-out "%{http_code}" -k "$@" "${endpoint}${path}") - if [[ "${STATUS}" -eq 200 ]]; then - exit 0 - fi - - echo "Error: Got HTTP code ${STATUS} but expected a 200" - exit 1 - } - - http "/app/kibana" - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - runAsNonRoot: true - runAsUser: 1000 - volumeMounts: - - mountPath: /usr/share/kibana/config - name: kibana-cm - securityContext: - fsGroup: 1000 \ No newline at end of file + {{ include "kibana.common" . | nindent 2 }} \ No newline at end of file diff --git a/addons/elasticsearch/templates/cmpd-kibana-8.yaml b/addons/elasticsearch/templates/cmpd-kibana-8.yaml index 51eda23da..8fbc2d867 100644 --- a/addons/elasticsearch/templates/cmpd-kibana-8.yaml +++ b/addons/elasticsearch/templates/cmpd-kibana-8.yaml @@ -7,187 +7,10 @@ metadata: labels: {{- include "elasticsearch.labels" . | nindent 4 }} spec: - provider: kubeblocks - description: Kibana is a browser-based analytics and search dashboard for Elasticsearch. - serviceKind: kibana serviceVersion: 8.8.2 - updateStrategy: Parallel - services: - - name: http - serviceName: http - spec: - ipFamilyPolicy: PreferDualStack - ipFamilies: - - IPv4 - ports: - - name: http - port: 5601 - targetPort: http - vars: - - name: ELASTIC_USER_PASSWORD - valueFrom: - credentialVarRef: - compDef: {{ include "elasticsearch.cmpdRegexPattern" . }} - name: elastic - optional: false - password: Required - multipleClusterObjectOption: - strategy: individual - - name: KIBANA_SYSTEM_USER_PASSWORD - valueFrom: - credentialVarRef: - compDef: {{ include "elasticsearch.cmpdRegexPattern" . }} - name: kibana_system - optional: false - password: Required - multipleClusterObjectOption: - strategy: individual - - name: ELASTICSEARCH_HOST - valueFrom: - serviceVarRef: - compDef: {{ include "elasticsearch.cmpdRegexPattern" . }} - name: http - host: Required - multipleClusterObjectOption: - strategy: individual configs: - name: kibana-cm templateRef: {{ include "kibana8.configTplName" . }} namespace: {{ .Release.Namespace }} volumeName: kibana-cm - runtime: - containers: - - env: - - name: NSS_SDB_USE_CACHE - value: "no" - - name: CLUSTER_DOMAIN - value: {{ .Values.clusterDomain | quote }} - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: - - bash - - -c - - | - function info() { - echo "[$(date +'%Y-%m-%d %H:%M:%S')] $*" - } - if [ -n "${KB_TLS_CERT_FILE}" ]; then - READINESS_PROBE_PROTOCOL=https - else - READINESS_PROBE_PROTOCOL=http - fi - # All the components' password of elastic must be the same, So we find the first environment variable that starts with ELASTIC_USER_PASSWORD - ELASTIC_AUTH_PASSWORD="" - if [ "${TLS_ENABLED}" == "true" ]; then - last_value="" - set +x - for env_var in $(env | grep -E '^ELASTIC_USER_PASSWORD'); do - value="${env_var#*=}" - if [ -n "$value" ]; then - if [ -n "$last_value" ] && [ "$last_value" != "$value" ]; then - echo "Error conflicting env $env_var of elastic password values found, all the components' password of elastic must be the same." - exit 1 - fi - last_value="$value" - fi - done - ELASTIC_AUTH_PASSWORD="$last_value" - fi - for env_var in $(env | grep -E '^ELASTICSEARCH_HOST'); do - value="${env_var#*=}" - if [ -n "$value" ]; then - ELASTICSEARCH_HOST="$value" - break - fi - done - if [ -z "$ELASTICSEARCH_HOST" ]; then - echo "Invalid ELASTICSEARCH_HOST" - exit 1 - fi - endpoint="${READINESS_PROBE_PROTOCOL}://${ELASTICSEARCH_HOST}.${KB_NAMESPACE}.svc.${CLUSTER_DOMAIN}:9200" - common_options="-s -u elastic:${ELASTIC_AUTH_PASSWORD} --fail --connect-timeout 3 -k" - while true; do - if [ -n "${KB_TLS_CERT_FILE}" ]; then - out=$(curl ${common_options} -X GET "${endpoint}/kubeblocks_ca_crt/_doc/1?pretty") - if [ $? == 0 ]; then - echo "$out" | grep '"ca.crt" :' | awk -F: '{print $2}' | tr -d '",' | xargs | base64 -d > /tmp/elastic.ca.crt - info "elasticsearch is ready" - break - fi - else - curl ${common_options} -X GET "${endpoint}" - if [ $? == 0 ]; then - info "elasticsearch is ready" - break - fi - fi - info "waiting for elasticsearch to be ready" - sleep 1 - done - if [ -f /bin/tini ]; then - /bin/tini -- /usr/local/bin/kibana-docker -e ${endpoint} -H ${POD_IP} - else - /usr/local/bin/kibana-docker -e ${endpoint} -H ${POD_IP} - fi - name: kibana - ports: - - containerPort: 5601 - name: http - protocol: TCP - readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 3 - timeoutSeconds: 5 - exec: - command: - - bash - - -c - - | - #!/usr/bin/env bash -e - - # Disable nss cache to avoid filling dentry cache when calling curl - # This is required with Kibana Docker using nss < 3.52 - export NSS_SDB_USE_CACHE=no - - http () { - local path="${1}" - set -- -XGET -s --fail -L - - if [ -n "${ELASTICSEARCH_USERNAME}" ] && [ -n "${ELASTICSEARCH_PASSWORD}" ]; then - set -- "$@" -u "${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}" - fi - - if [ -n "${KB_TLS_CERT_FILE}" ]; then - READINESS_PROBE_PROTOCOL=https - else - READINESS_PROBE_PROTOCOL=http - fi - endpoint="${READINESS_PROBE_PROTOCOL}://${POD_IP}:5601" - STATUS=$(curl --output /dev/null --write-out "%{http_code}" -k "$@" "${endpoint}${path}") - if [[ "${STATUS}" -eq 200 ]]; then - exit 0 - fi - - echo "Error: Got HTTP code ${STATUS} but expected a 200" - exit 1 - } - - http "/app/kibana" - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - runAsNonRoot: true - runAsUser: 1000 - volumeMounts: - - mountPath: /usr/share/kibana/config - name: kibana-cm - securityContext: - fsGroup: 1000 \ No newline at end of file + {{ include "kibana.common" . | nindent 2 }} \ No newline at end of file diff --git a/addons/elasticsearch/templates/cmpv-es.yaml b/addons/elasticsearch/templates/cmpv-es.yaml index 08d7a7c1d..e1ccab33d 100644 --- a/addons/elasticsearch/templates/cmpv-es.yaml +++ b/addons/elasticsearch/templates/cmpv-es.yaml @@ -25,11 +25,7 @@ spec: serviceVersion: {{ index . 1 }} images: elasticsearch: {{ $imageRegistry }}/{{ $.Values.image.repository }}:{{ index . 2 }} -{{- if eq $.Values.image.plugin.tag (index . 2) }} prepare-plugins: {{ $imageRegistry }}/{{ $.Values.image.plugin.repository }}:{{ $.Values.image.plugin.tag }} -{{- else }} - prepare-plugins: {{ $imageRegistry }}/{{ $.Values.image.repository }}:{{ index . 2 }} -{{- end }} install-plugins: {{ $imageRegistry }}/{{ $.Values.image.repository }}:{{ index . 2 }} exporter: {{ $imageRegistry }}/{{ $.Values.image.exporter.repository }}:{{ $.Values.image.exporter.tag }} install-es-agent: {{ $imageRegistry }}/{{ $.Values.image.agent.repository }}:{{ $.Values.image.agent.tag }} diff --git a/addons/elasticsearch/values.yaml b/addons/elasticsearch/values.yaml index f0c004881..14206680f 100644 --- a/addons/elasticsearch/values.yaml +++ b/addons/elasticsearch/values.yaml @@ -17,7 +17,7 @@ image: tag: "v1.7.0" plugin: repository: apecloud/elasticsearch-plugins - tag: "8.8.2" + tag: "0.1.0" tools: repository: apecloud/curl-jq tag: "0.1.0"