diff --git a/.github/workflows/build_branch.yaml b/.github/workflows/build_branch.yaml
index ae218a733..8d7ef1f10 100644
--- a/.github/workflows/build_branch.yaml
+++ b/.github/workflows/build_branch.yaml
@@ -23,7 +23,8 @@ jobs:
DOCKER_PASS: ${{ secrets.DOCKER_PASS }}
run: |
export CHO_RELEASE=$(cat release)
+ export GO_VERSION=$(grep '^go ' go.mod | awk '{print $2}')
echo "${DOCKER_PASS}" | docker login -u $DOCKER_USER --password-stdin docker.io
- docker buildx build --progress plain --platform=linux/amd64,linux/arm64 -f dockerfile/operator/Dockerfile -t docker.io/${DOCKER_ORG}/clickhouse-operator:${CHO_RELEASE} --pull --push .
- docker buildx build --progress plain --platform=linux/amd64,linux/arm64 -f dockerfile/metrics-exporter/Dockerfile -t docker.io/${DOCKER_ORG}/metrics-exporter:${CHO_RELEASE} --pull --push .
+ docker buildx build --progress plain --platform=linux/amd64,linux/arm64 -f dockerfile/operator/Dockerfile --build-arg GO_VERSION=${GO_VERSION} -t docker.io/${DOCKER_ORG}/clickhouse-operator:${CHO_RELEASE} --pull --push .
+ docker buildx build --progress plain --platform=linux/amd64,linux/arm64 -f dockerfile/metrics-exporter/Dockerfile --build-arg GO_VERSION=${GO_VERSION} -t docker.io/${DOCKER_ORG}/metrics-exporter:${CHO_RELEASE} --pull --push .
diff --git a/.github/workflows/build_master.yaml b/.github/workflows/build_master.yaml
index 812b8e331..895225adb 100644
--- a/.github/workflows/build_master.yaml
+++ b/.github/workflows/build_master.yaml
@@ -23,8 +23,8 @@ jobs:
DOCKER_PASS: ${{ secrets.DOCKER_PASS }}
run: |
export CHO_RELEASE=latest
+ export GO_VERSION=$(grep '^go ' go.mod | awk '{print $2}')
echo "${DOCKER_PASS}" | docker login -u $DOCKER_USER --password-stdin docker.io
- docker buildx build --progress plain --platform=linux/amd64,linux/arm64 -f dockerfile/operator/Dockerfile -t docker.io/${DOCKER_ORG}/clickhouse-operator:${CHO_RELEASE} --pull --push .
- docker buildx build --progress plain --platform=linux/amd64,linux/arm64 -f dockerfile/metrics-exporter/Dockerfile -t docker.io/${DOCKER_ORG}/metrics-exporter:${CHO_RELEASE} --pull --push .
-
+ docker buildx build --progress plain --platform=linux/amd64,linux/arm64 -f dockerfile/operator/Dockerfile --build-arg GO_VERSION=${GO_VERSION} -t docker.io/${DOCKER_ORG}/clickhouse-operator:${CHO_RELEASE} --pull --push .
+ docker buildx build --progress plain --platform=linux/amd64,linux/arm64 -f dockerfile/metrics-exporter/Dockerfile --build-arg GO_VERSION=${GO_VERSION} -t docker.io/${DOCKER_ORG}/metrics-exporter:${CHO_RELEASE} --pull --push .
diff --git a/.github/workflows/release_chart.yaml b/.github/workflows/release_chart.yaml
index 59835d4c8..c86a0b37d 100644
--- a/.github/workflows/release_chart.yaml
+++ b/.github/workflows/release_chart.yaml
@@ -28,13 +28,6 @@ jobs:
- name: Package Chart
run: cr package deploy/helm/clickhouse-operator
- - name: Install Helm
- run: |
- curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
-
- - name: Login to GitHub Container Registry
- run: echo "${{ secrets.GITHUB_TOKEN }}" | helm registry login ghcr.io -u ${{ github.actor }} --password-stdin
-
- name: Get Release Assets
id: get_assets
run: |
@@ -62,13 +55,38 @@ jobs:
-H "Content-Type: application/gzip" \
-T "${CHART_PATH}" \
"https://uploads.github.com/repos/${GITHUB_REPOSITORY}/releases/${{ github.event.release.id }}/assets?name=$(basename ${CHART_PATH})"
-
+ - name: Validate Helm Repository Configuration
+ run: |
+ if [ -z "${{ secrets.HELM_GITHUB_TOKEN }}" ]; then
+ echo "ERROR: HELM_GITHUB_TOKEN secret is not set or is empty"
+ echo "Please add HELM_GITHUB_TOKEN to repository secrets with write access to the helm repository"
+ exit 1
+ fi
+
+ if [ -z "${{ vars.HELM_GITHUB_REPOSITORY }}" ]; then
+ echo "ERROR: HELM_GITHUB_REPOSITORY variable is not set or is empty"
+ echo "Please add HELM_GITHUB_REPOSITORY to repository variables (Settings -> Secrets and variables -> Actions -> Variables)"
+ exit 1
+ fi
+
+ echo "Configuration validated:"
+ echo " HELM_GITHUB_REPOSITORY: ${{ vars.HELM_GITHUB_REPOSITORY }}"
+ echo " HELM_GITHUB_TOKEN: [SET]"
+
+ - name: Upload Release Artifacts to Helm Repo
+ run: |
+ cr upload \
+ --git-repo=${{ vars.HELM_GITHUB_REPOSITORY }} \
+ --owner=${GITHUB_REPOSITORY_OWNER} \
+ --release-name-template=${{ github.event.release.name }} \
+ --token=${{ secrets.HELM_GITHUB_TOKEN }} \
+ --package-path=.cr-release-packages \
+ --skip-existing
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
-
- - name: Release Chart
+ - name: Release Chart to Operator Repo
run: |
git remote add httpsorigin "https://github.com/${GITHUB_REPOSITORY}.git"
git fetch httpsorigin
@@ -80,8 +98,86 @@ jobs:
--index-path=index.yaml \
--remote=httpsorigin \
--push
-
- - name: Push Helm Chart to OCI Registry
+ - name: Release Chart to Helm Repo
run: |
- CHART_PATH=$(ls .cr-release-packages/altinity-clickhouse-operator-*.tgz)
- helm push "${CHART_PATH}" oci://ghcr.io/altinity/clickhouse-operator-helm-chart
+ # Validate configuration before attempting to push
+ if [ -z "${{ vars.HELM_GITHUB_REPOSITORY }}" ]; then
+ echo "ERROR: HELM_GITHUB_REPOSITORY variable is not set or is empty"
+ echo "This step requires HELM_GITHUB_REPOSITORY to be set in repository variables"
+ echo "Go to: Settings -> Secrets and variables -> Actions -> Variables"
+ exit 1
+ fi
+
+ if [ -z "${{ secrets.HELM_GITHUB_TOKEN }}" ]; then
+ echo "ERROR: HELM_GITHUB_TOKEN secret is not set or is empty"
+ echo "This step requires HELM_GITHUB_TOKEN with write access to: ${GITHUB_REPOSITORY_OWNER}/${{ vars.HELM_GITHUB_REPOSITORY }}"
+ echo "Go to: Settings -> Secrets and variables -> Actions -> Secrets"
+ exit 1
+ fi
+
+ echo "Attempting to push to helm repository: ${GITHUB_REPOSITORY_OWNER}/${{ vars.HELM_GITHUB_REPOSITORY }}"
+
+ # Test token authentication
+ echo "Testing token authentication..."
+ TOKEN_USER=$(curl -sS -H "Authorization: token ${{ secrets.HELM_GITHUB_TOKEN }}" https://api.github.com/user | jq -r '.login')
+ echo "Token authenticated as user: ${TOKEN_USER}"
+
+ # Save current directory
+ WORK_DIR=$(pwd)
+
+ # Create a temporary directory for helm repo operations
+ TEMP_DIR=$(mktemp -d)
+ cd "$TEMP_DIR"
+
+ # Clone the helm repository WITHOUT token in URL to avoid masking issues
+ echo "Cloning helm repository to temporary directory..."
+ git clone https://github.com/${GITHUB_REPOSITORY_OWNER}/${{ vars.HELM_GITHUB_REPOSITORY }}.git helm-repo || {
+ echo "ERROR: Failed to clone helm repository"
+ echo "Please verify:"
+ echo " 1. Repository exists: ${GITHUB_REPOSITORY_OWNER}/${{ vars.HELM_GITHUB_REPOSITORY }}"
+ exit 1
+ }
+
+ cd helm-repo
+
+ # Configure git credentials for push
+ git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
+ git config user.name "$GITHUB_ACTOR"
+
+ # Set up authentication using git credential helper
+ git config credential.helper "store --file=.git/credentials"
+ echo "https://x-access-token:${{ secrets.HELM_GITHUB_TOKEN }}@github.com" > .git/credentials
+
+ # Now use cr index from within the helm repo to avoid history conflicts
+ echo "Generating index.yaml within helm repository context..."
+
+ # Copy the package to a local directory within helm repo
+ mkdir -p .cr-release-packages
+ cp "$WORK_DIR"/.cr-release-packages/*.tgz .cr-release-packages/ || {
+ echo "ERROR: No chart packages found in .cr-release-packages"
+ exit 1
+ }
+
+ # Generate index with cr (this will handle the gh-pages branch automatically)
+ cr index \
+ --git-repo=${{ vars.HELM_GITHUB_REPOSITORY }} \
+ --owner=${GITHUB_REPOSITORY_OWNER} \
+ --release-name-template=${{ github.event.release.name }} \
+ --token=${{ secrets.HELM_GITHUB_TOKEN }} \
+ --package-path=.cr-release-packages \
+ --index-path=index.yaml \
+ --push || {
+ echo "ERROR: Failed to generate or push index to helm repository"
+ echo "Debug: Current directory is $(pwd)"
+ echo "Debug: Git remotes:"
+ git remote -v
+ echo "Debug: Git status:"
+ git status
+ exit 1
+ }
+
+ echo "Successfully updated helm repository index"
+
+ # Cleanup
+ cd /
+ rm -rf "$TEMP_DIR"
diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml
index dd938c311..b148a7326 100644
--- a/.github/workflows/run_tests.yaml
+++ b/.github/workflows/run_tests.yaml
@@ -55,13 +55,15 @@ jobs:
run: |
minikube status
export CHO_RELEASE=$(cat release)
+ export GO_VERSION=$(grep '^go ' go.mod | awk '{print $2}')
echo "current release=$CHO_RELEASE"
+ echo "current go version=$GO_VERSION"
- docker build -f dockerfile/operator/Dockerfile -t altinity/clickhouse-operator:${CHO_RELEASE} --pull .
- docker build -f dockerfile/metrics-exporter/Dockerfile -t altinity/metrics-exporter:${CHO_RELEASE} --pull .
+ docker build -f dockerfile/operator/Dockerfile --build-arg GO_VERSION=${GO_VERSION} -t altinity/clickhouse-operator:${CHO_RELEASE} --pull .
+ docker build -f dockerfile/metrics-exporter/Dockerfile --build-arg GO_VERSION=${GO_VERSION} -t altinity/metrics-exporter:${CHO_RELEASE} --pull .
docker image save altinity/clickhouse-operator:${CHO_RELEASE} -o operator.tar
- docker image save altinity/metrics-exporter:${CHO_RELEASE} -o metrics-exporter.tar
+ docker image save altinity/metrics-exporter:${CHO_RELEASE} -o metrics-exporter.tar
minikube image load operator.tar
minikube image load metrics-exporter.tar
diff --git a/cmd/operator/app/thread_chi.go b/cmd/operator/app/thread_chi.go
index 2cd3286e5..333c96d4d 100644
--- a/cmd/operator/app/thread_chi.go
+++ b/cmd/operator/app/thread_chi.go
@@ -63,6 +63,13 @@ func initClickHouse(ctx context.Context) {
log.V(1).F().Info("Config parsed:")
log.Info("\n" + chop.Config().String(true))
+ // Log namespace deny list configuration
+ if chop.Config().Watch.Namespaces.Exclude.Len() > 0 {
+ log.Info("Namespace deny list configured: %v - these namespaces will NOT be reconciled", chop.Config().Watch.Namespaces.Exclude.Value())
+ } else {
+ log.V(1).Info("No namespace deny list configured - all watched namespaces will be reconciled")
+ }
+
// Create Informers
kubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(
kubeClient,
diff --git a/cmd/operator/app/thread_keeper.go b/cmd/operator/app/thread_keeper.go
index 19614d84d..e5e707117 100644
--- a/cmd/operator/app/thread_keeper.go
+++ b/cmd/operator/app/thread_keeper.go
@@ -93,6 +93,12 @@ func keeperPredicate() predicate.Funcs {
return false
}
+ // Check if namespace should be watched (includes deny list check)
+ if !chop.Config().IsNamespaceWatched(obj.Namespace) {
+ logger.V(2).Info("chkInformer: skip event, namespace is not watched or is in deny list", "namespace", obj.Namespace)
+ return false
+ }
+
if obj.Spec.Suspend.Value() {
return false
}
@@ -107,6 +113,12 @@ func keeperPredicate() predicate.Funcs {
return false
}
+ // Check if namespace should be watched (includes deny list check)
+ if !chop.Config().IsNamespaceWatched(obj.Namespace) {
+ logger.V(2).Info("chkInformer: skip event, namespace is not watched or is in deny list", "namespace", obj.Namespace)
+ return false
+ }
+
if obj.Spec.Suspend.Value() {
return false
}
diff --git a/config/config-dev.yaml b/config/config-dev.yaml
index b16340ea9..7720cb9d2 100644
--- a/config/config-dev.yaml
+++ b/config/config-dev.yaml
@@ -1,17 +1,17 @@
+# IMPORTANT
+# This file is auto-generated
+# Do not edit this file - all changes would be lost
+# Edit appropriate template in the following folder:
+# deploy/builder/templates-config
+# IMPORTANT
#
-#
-#
-#
-#
-#
-#
-#
-#
-#
-#
-#
-#
-#
+# Template parameters available:
+# WATCH_NAMESPACES=
+# CH_USERNAME_PLAIN=
+# CH_PASSWORD_PLAIN=
+# CH_CREDENTIALS_SECRET_NAMESPACE=
+# CH_CREDENTIALS_SECRET_NAME=clickhouse-operator
+# VERBOSITY=1
################################################
##
@@ -23,8 +23,9 @@ watch:
# Concurrently running operators should watch on different namespaces.
# IMPORTANT
# Regexp is applicable.
- #namespaces: ["dev", "test"]
- namespaces: [dev, test]
+ namespaces:
+ include: [dev, test]
+ exclude: []
clickhouse:
configuration:
@@ -276,8 +277,8 @@ template:
chi:
# CHI template updates handling policy
# Possible policy values:
- # - ReadOnStart. Accept CHIT updates on the operators start only.
- # - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply news CHITs on next regular reconcile of the CHI
+ # - ReadOnStart. Accept CHIT updates on the operator's start only.
+ # - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply new CHITs on next regular reconcile of the CHI
policy: ApplyOnNextReconcile
# Path to the folder where ClickHouseInstallation templates .yaml manifests are located.
@@ -288,7 +289,7 @@ template:
# CHK template updates handling policy
# Possible policy values:
# - ReadOnStart. Accept CHIT updates on the operators start only.
- # - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply news CHITs on next regular reconcile of the CHI
+ # - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply new CHITs on next regular reconcile of the CHI
policy: ApplyOnNextReconcile
# Path to the folder where ClickHouseInstallation templates .yaml manifests are located.
@@ -315,9 +316,9 @@ reconcile:
# 3. The first shard is always reconciled alone. Concurrency starts from the second shard and onward.
# Thus limiting number of shards being reconciled (and thus having hosts down) in each CHI by both number and percentage
- # Max number of concurrent shard reconciles within one CHI in progress
+ # Max number of concurrent shard reconciles within one cluster in progress
reconcileShardsThreadsNumber: 1
- # Max percentage of concurrent shard reconciles within one CHI in progress
+ # Max percentage of concurrent shard reconciles within one cluster in progress
reconcileShardsMaxConcurrencyPercent: 50
# Reconcile StatefulSet scenario
@@ -356,15 +357,31 @@ reconcile:
# - to be excluded from a ClickHouse cluster
# - to complete all running queries
# - to be included into a ClickHouse cluster
- # respectfully before moving forward
+ # respectfully before moving forward with host reconcile
wait:
exclude: true
queries: true
include: false
+ # The operator during reconcile procedure should wait for replicas to catch-up
+ # replication delay a.k.a replication lag for the following replicas
replicas:
+ # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
all: no
+ # New replicas only are requested to wait for replication to catch-up
new: yes
+ # Replication catch-up is considered to be completed as soon as replication delay
+ # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
+ # is within this specified delay (in seconds)
delay: 10
+ probes:
+ # Whether the operator during host launch procedure should wait for startup probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to do not wait.
+ startup: no
+ # Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to wait.
+ readiness: yes
################################################
##
diff --git a/config/config.yaml b/config/config.yaml
index 2b4bbce1b..ce6219a5a 100644
--- a/config/config.yaml
+++ b/config/config.yaml
@@ -23,7 +23,6 @@ watch:
# Concurrently running operators should watch on different namespaces.
# IMPORTANT
# Regexp is applicable.
- #namespaces: ["dev", "test"]
namespaces: []
clickhouse:
@@ -356,15 +355,31 @@ reconcile:
# - to be excluded from a ClickHouse cluster
# - to complete all running queries
# - to be included into a ClickHouse cluster
- # respectfully before moving forward
+ # respectfully before moving forward with host reconcile
wait:
exclude: true
queries: true
include: false
+ # The operator during reconcile procedure should wait for replicas to catch-up
+ # replication delay a.k.a replication lag for the following replicas
replicas:
+ # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
all: no
+ # New replicas only are requested to wait for replication to catch-up
new: yes
+ # Replication catch-up is considered to be completed as soon as replication delay
+ # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
+ # is within this specified delay (in seconds)
delay: 10
+ probes:
+ # Whether the operator during host launch procedure should wait for startup probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to do not wait.
+ startup: no
+ # Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to wait.
+ readiness: yes
################################################
##
diff --git a/deploy/builder/templates-config/config.yaml b/deploy/builder/templates-config/config.yaml
index 25d4eacbd..0679d8782 100644
--- a/deploy/builder/templates-config/config.yaml
+++ b/deploy/builder/templates-config/config.yaml
@@ -17,7 +17,6 @@ watch:
# Concurrently running operators should watch on different namespaces.
# IMPORTANT
# Regexp is applicable.
- #namespaces: ["dev", "test"]
namespaces: [${WATCH_NAMESPACES}]
clickhouse:
@@ -350,15 +349,31 @@ reconcile:
# - to be excluded from a ClickHouse cluster
# - to complete all running queries
# - to be included into a ClickHouse cluster
- # respectfully before moving forward
+ # respectfully before moving forward with host reconcile
wait:
exclude: true
queries: true
include: false
+ # The operator during reconcile procedure should wait for replicas to catch-up
+ # replication delay a.k.a replication lag for the following replicas
replicas:
+ # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
all: no
+ # New replicas only are requested to wait for replication to catch-up
new: yes
+ # Replication catch-up is considered to be completed as soon as replication delay
+ # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
+ # is within this specified delay (in seconds)
delay: 10
+ probes:
+ # Whether the operator during host launch procedure should wait for startup probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to do not wait.
+ startup: no
+ # Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to wait.
+ readiness: yes
################################################
##
diff --git a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml
index ad7327eee..6995906da 100644
--- a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml
+++ b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-01-chi-chit.yaml
@@ -358,9 +358,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -433,20 +433,6 @@ spec:
service:
<<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -490,6 +476,72 @@ spec:
enabled:
<<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ <<: *TypeStringBool
+ queries:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -620,6 +672,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -821,19 +876,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ <<: *TypeReconcileRuntime
+ host:
+ <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -1120,7 +1165,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1376,7 +1421,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml
index bd03da722..3db24c5f5 100644
--- a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml
+++ b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-02-chopconf.yaml
@@ -53,10 +53,9 @@ spec:
description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
properties:
namespaces:
- type: array
+ type: object
description: "List of namespaces where clickhouse-operator watches for events."
- items:
- type: string
+ x-kubernetes-preserve-unknown-fields: true
clickhouse:
type: object
description: "Clickhouse related parameters used by clickhouse-operator"
@@ -327,7 +326,6 @@ spec:
- to complete all running queries
- to be included into a ClickHouse cluster
respectfully before moving forward
-
properties:
wait:
type: object
@@ -379,6 +377,22 @@ spec:
delay:
type: integer
description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
annotation:
type: object
description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
diff --git a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml
index b3f7b4f4e..5323073bf 100644
--- a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml
+++ b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-01-section-crd-03-chk.yaml
@@ -676,7 +676,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
diff --git a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-02-section-rbac-02-role.yaml b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-02-section-rbac-02-role.yaml
index c0b1d5418..8e8831222 100644
--- a/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-02-section-rbac-02-role.yaml
+++ b/deploy/builder/templates-install-bundle/clickhouse-operator-install-yaml-template-02-section-rbac-02-role.yaml
@@ -142,6 +142,19 @@ rules:
- create
- delete
+ #
+ # discovery.* resources
+ #
+
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+
#
# apiextensions
#
diff --git a/deploy/builder/templates-operatorhub/clickhouse-operator.vVERSION.clusterserviceversion-template.yaml b/deploy/builder/templates-operatorhub/clickhouse-operator.vVERSION.clusterserviceversion-template.yaml
index 6a20a1b64..14fd150b8 100644
--- a/deploy/builder/templates-operatorhub/clickhouse-operator.vVERSION.clusterserviceversion-template.yaml
+++ b/deploy/builder/templates-operatorhub/clickhouse-operator.vVERSION.clusterserviceversion-template.yaml
@@ -1216,11 +1216,11 @@ spec:
installModes:
- supported: true
type: OwnNamespace
- - supported: false
+ - supported: true
type: SingleNamespace
- - supported: false
+ - supported: true
type: MultiNamespace
- - supported: false
+ - supported: true
type: AllNamespaces
install:
diff --git a/deploy/helm/clickhouse-operator/Chart.yaml b/deploy/helm/clickhouse-operator/Chart.yaml
index 851dcb2b5..1ead0e4c0 100644
--- a/deploy/helm/clickhouse-operator/Chart.yaml
+++ b/deploy/helm/clickhouse-operator/Chart.yaml
@@ -13,8 +13,8 @@ description: |-
kubectl apply -f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml
```
type: application
-version: 0.25.3
-appVersion: 0.25.3
+version: 0.25.4
+appVersion: 0.25.4
home: https://github.com/Altinity/clickhouse-operator
icon: https://logosandtypes.com/wp-content/uploads/2020/12/altinity.svg
maintainers:
diff --git a/deploy/helm/clickhouse-operator/README.md b/deploy/helm/clickhouse-operator/README.md
index ac449f968..ad8c2611f 100644
--- a/deploy/helm/clickhouse-operator/README.md
+++ b/deploy/helm/clickhouse-operator/README.md
@@ -1,6 +1,6 @@
# altinity-clickhouse-operator
-  
+  
Helm chart to deploy [altinity-clickhouse-operator](https://github.com/Altinity/clickhouse-operator).
diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml
index 24f7f4a31..a5b5ff5a7 100644
--- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml
+++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -358,9 +358,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -433,20 +433,6 @@ spec:
service:
!!merge <<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -490,6 +476,72 @@ spec:
enabled:
!!merge <<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ !!merge <<: *TypeStringBool
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ !!merge <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -620,6 +672,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -818,19 +873,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ !!merge <<: *TypeReconcileRuntime
+ host:
+ !!merge <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -1117,7 +1162,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1371,7 +1416,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml
index 51ae50227..6af0653e5 100644
--- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml
+++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -358,9 +358,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -433,20 +433,6 @@ spec:
service:
!!merge <<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -490,6 +476,72 @@ spec:
enabled:
!!merge <<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ !!merge <<: *TypeStringBool
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ !!merge <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -620,6 +672,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -818,19 +873,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ !!merge <<: *TypeReconcileRuntime
+ host:
+ !!merge <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -1117,7 +1162,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1371,7 +1416,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml
index ae70976fd..661d48e97 100644
--- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml
+++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousekeeperinstallations.clickhouse-keeper.altinity.com.yaml
@@ -1,13 +1,13 @@
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.3
+ clickhouse-keeper.altinity.com/chop: 0.25.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -675,7 +675,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml
index 1aebf1cc2..6367355de 100644
--- a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml
+++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml
@@ -7,7 +7,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -53,10 +53,9 @@ spec:
description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
properties:
namespaces:
- type: array
+ type: object
description: "List of namespaces where clickhouse-operator watches for events."
- items:
- type: string
+ x-kubernetes-preserve-unknown-fields: true
clickhouse:
type: object
description: "Clickhouse related parameters used by clickhouse-operator"
@@ -378,6 +377,22 @@ spec:
delay:
type: integer
description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
annotation:
type: object
description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
diff --git a/deploy/helm/clickhouse-operator/templates/generated/ClusterRole-clickhouse-operator-kube-system.yaml b/deploy/helm/clickhouse-operator/templates/generated/ClusterRole-clickhouse-operator-kube-system.yaml
index 387351a5f..3a87ea7af 100644
--- a/deploy/helm/clickhouse-operator/templates/generated/ClusterRole-clickhouse-operator-kube-system.yaml
+++ b/deploy/helm/clickhouse-operator/templates/generated/ClusterRole-clickhouse-operator-kube-system.yaml
@@ -126,6 +126,17 @@ rules:
- create
- delete
#
+ # discovery.* resources
+ #
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+ #
# apiextensions
#
- apiGroups:
diff --git a/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml b/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml
index eb641d266..623212d50 100644
--- a/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml
+++ b/deploy/helm/clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml
@@ -2,9 +2,9 @@
#
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.3
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.4
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.3
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.4
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -222,7 +222,7 @@ spec:
securityContext: {{ toYaml .Values.metrics.containerSecurityContext | nindent 12 }}
{{ end }}
imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
- {{- if .Values.operator.priorityClassName }}priorityClassName: {{ .Values.operator.priorityClassName | quote }}{{- end }}
+ {{ if .Values.operator.priorityClassName }}priorityClassName: {{ .Values.operator.priorityClassName | quote }}{{ end }}
nodeSelector: {{ toYaml .Values.nodeSelector | nindent 8 }}
affinity: {{ toYaml .Values.affinity | nindent 8 }}
tolerations: {{ toYaml .Values.tolerations | nindent 8 }}
diff --git a/deploy/helm/clickhouse-operator/templates/generated/Role-clickhouse-operator.yaml b/deploy/helm/clickhouse-operator/templates/generated/Role-clickhouse-operator.yaml
index 2fb3015e5..eaa8ff45b 100644
--- a/deploy/helm/clickhouse-operator/templates/generated/Role-clickhouse-operator.yaml
+++ b/deploy/helm/clickhouse-operator/templates/generated/Role-clickhouse-operator.yaml
@@ -126,6 +126,17 @@ rules:
- create
- delete
#
+ # discovery.* resources
+ #
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+ #
# apiextensions
#
- apiGroups:
diff --git a/deploy/helm/clickhouse-operator/templates/generated/Secret-clickhouse-operator.yaml b/deploy/helm/clickhouse-operator/templates/generated/Secret-clickhouse-operator.yaml
index 56fb918ae..cc9c70fda 100644
--- a/deploy/helm/clickhouse-operator/templates/generated/Secret-clickhouse-operator.yaml
+++ b/deploy/helm/clickhouse-operator/templates/generated/Secret-clickhouse-operator.yaml
@@ -3,7 +3,7 @@
# Template parameters available:
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
diff --git a/deploy/helm/clickhouse-operator/values.schema.json b/deploy/helm/clickhouse-operator/values.schema.json
new file mode 100644
index 000000000..c299c1e2d
--- /dev/null
+++ b/deploy/helm/clickhouse-operator/values.schema.json
@@ -0,0 +1,895 @@
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "type": "object",
+ "properties": {
+ "additionalResources": {
+ "description": "list of additional resources to create (processed via `tpl` function), useful for create ClickHouse clusters together with clickhouse-operator. check `kubectl explain chi` for details",
+ "type": "array"
+ },
+ "affinity": {
+ "description": "affinity for scheduler pod assignment, check `kubectl explain pod.spec.affinity` for details",
+ "type": "object"
+ },
+ "commonAnnotations": {
+ "description": "set of annotations that will be applied to all the resources for the operator",
+ "type": "object"
+ },
+ "commonLabels": {
+ "description": "set of labels that will be applied to all the resources for the operator",
+ "type": "object"
+ },
+ "configs": {
+ "description": "clickhouse operator configs",
+ "type": "object",
+ "properties": {
+ "confdFiles": {
+ "type": ["string", "null"]
+ },
+ "configdFiles": {
+ "type": "object",
+ "properties": {
+ "01-clickhouse-01-listen.xml": {
+ "type": "string"
+ },
+ "01-clickhouse-02-logger.xml": {
+ "type": "string"
+ },
+ "01-clickhouse-03-query_log.xml": {
+ "type": "string"
+ },
+ "01-clickhouse-04-part_log.xml": {
+ "type": "string"
+ },
+ "01-clickhouse-05-trace_log.xml": {
+ "type": "string"
+ }
+ }
+ },
+ "files": {
+ "type": "object",
+ "properties": {
+ "config.yaml": {
+ "type": "object",
+ "properties": {
+ "annotation": {
+ "type": "object",
+ "properties": {
+ "exclude": {
+ "type": "array"
+ },
+ "include": {
+ "type": "array"
+ }
+ }
+ },
+ "clickhouse": {
+ "type": "object",
+ "properties": {
+ "access": {
+ "type": "object",
+ "properties": {
+ "password": {
+ "type": "string"
+ },
+ "port": {
+ "type": "integer"
+ },
+ "rootCA": {
+ "type": "string"
+ },
+ "scheme": {
+ "type": "string"
+ },
+ "secret": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "namespace": {
+ "type": "string"
+ }
+ }
+ },
+ "timeouts": {
+ "type": "object",
+ "properties": {
+ "connect": {
+ "type": "integer"
+ },
+ "query": {
+ "type": "integer"
+ }
+ }
+ },
+ "username": {
+ "type": "string"
+ }
+ }
+ },
+ "addons": {
+ "type": "object",
+ "properties": {
+ "rules": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "spec": {
+ "type": "object",
+ "properties": {
+ "configuration": {
+ "type": "object",
+ "properties": {
+ "files": {
+ "type": "null"
+ },
+ "profiles": {
+ "type": ["object", "null"],
+ "properties": {
+ "clickhouse_operator/format_display_secrets_in_show_and_select": {
+ "type": "integer"
+ }
+ }
+ },
+ "quotas": {
+ "type": ["object", "null"]
+ },
+ "settings": {
+ "type": ["object", "null"],
+ "properties": {
+ "display_secrets_in_show_and_select": {
+ "type": "integer"
+ }
+ }
+ },
+ "users": {
+ "type": ["object", "null"],
+ "properties": {
+ "{clickhouseOperatorUser}/access_management": {
+ "type": "integer"
+ },
+ "{clickhouseOperatorUser}/named_collection_control": {
+ "type": "integer"
+ },
+ "{clickhouseOperatorUser}/show_named_collections": {
+ "type": "integer"
+ },
+ "{clickhouseOperatorUser}/show_named_collections_secrets": {
+ "type": "integer"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "version": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "configuration": {
+ "type": "object",
+ "properties": {
+ "file": {
+ "type": "object",
+ "properties": {
+ "path": {
+ "type": "object",
+ "properties": {
+ "common": {
+ "type": "string"
+ },
+ "host": {
+ "type": "string"
+ },
+ "user": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "network": {
+ "type": "object",
+ "properties": {
+ "hostRegexpTemplate": {
+ "type": "string"
+ }
+ }
+ },
+ "user": {
+ "type": "object",
+ "properties": {
+ "default": {
+ "type": "object",
+ "properties": {
+ "networksIP": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "password": {
+ "type": "string"
+ },
+ "profile": {
+ "type": "string"
+ },
+ "quota": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "configurationRestartPolicy": {
+ "type": "object",
+ "properties": {
+ "rules": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "rules": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "files/*.xml": {
+ "type": "string"
+ },
+ "files/config.d/*.xml": {
+ "type": "string"
+ },
+ "files/config.d/*dict*.xml": {
+ "type": "string"
+ },
+ "files/config.d/*no_restart*": {
+ "type": "string"
+ },
+ "profiles/default/background_*_pool_size": {
+ "type": "string"
+ },
+ "profiles/default/max_*_for_server": {
+ "type": "string"
+ },
+ "settings/*": {
+ "type": "string"
+ },
+ "settings/access_control_path": {
+ "type": "string"
+ },
+ "settings/dictionaries_config": {
+ "type": "string"
+ },
+ "settings/display_secrets_in_show_and_select": {
+ "type": "string"
+ },
+ "settings/logger": {
+ "type": "string"
+ },
+ "settings/logger/*": {
+ "type": "string"
+ },
+ "settings/macros/*": {
+ "type": "string"
+ },
+ "settings/max_*_to_drop": {
+ "type": "string"
+ },
+ "settings/max_concurrent_queries": {
+ "type": "string"
+ },
+ "settings/max_server_memory_*": {
+ "type": "string"
+ },
+ "settings/models_config": {
+ "type": "string"
+ },
+ "settings/remote_servers/*": {
+ "type": "string"
+ },
+ "settings/user_defined_executable_functions_config": {
+ "type": "string"
+ },
+ "settings/user_directories/*": {
+ "type": "string"
+ },
+ "zookeeper/*": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "version": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "metrics": {
+ "type": "object",
+ "properties": {
+ "timeouts": {
+ "type": "object",
+ "properties": {
+ "collect": {
+ "type": "integer"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "keeper": {
+ "type": "object",
+ "properties": {
+ "configuration": {
+ "type": "object",
+ "properties": {
+ "file": {
+ "type": "object",
+ "properties": {
+ "path": {
+ "type": "object",
+ "properties": {
+ "common": {
+ "type": "string"
+ },
+ "host": {
+ "type": "string"
+ },
+ "user": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "label": {
+ "type": "object",
+ "properties": {
+ "appendScope": {
+ "type": "string"
+ },
+ "exclude": {
+ "type": "array"
+ },
+ "include": {
+ "type": "array"
+ }
+ }
+ },
+ "logger": {
+ "type": "object",
+ "properties": {
+ "alsologtostderr": {
+ "type": "string"
+ },
+ "log_backtrace_at": {
+ "type": "string"
+ },
+ "logtostderr": {
+ "type": "string"
+ },
+ "stderrthreshold": {
+ "type": "string"
+ },
+ "v": {
+ "type": "string"
+ },
+ "vmodule": {
+ "type": "string"
+ }
+ }
+ },
+ "metrics": {
+ "type": "object",
+ "properties": {
+ "labels": {
+ "type": "object",
+ "properties": {
+ "exclude": {
+ "type": "array"
+ }
+ }
+ }
+ }
+ },
+ "pod": {
+ "type": "object",
+ "properties": {
+ "terminationGracePeriod": {
+ "type": "integer"
+ }
+ }
+ },
+ "reconcile": {
+ "type": "object",
+ "properties": {
+ "host": {
+ "type": "object",
+ "properties": {
+ "wait": {
+ "type": "object",
+ "properties": {
+ "exclude": {
+ "type": "boolean"
+ },
+ "include": {
+ "type": "boolean"
+ },
+ "probes": {
+ "type": "object",
+ "properties": {
+ "readiness": {
+ "type": "boolean"
+ },
+ "startup": {
+ "type": "boolean"
+ }
+ }
+ },
+ "queries": {
+ "type": "boolean"
+ },
+ "replicas": {
+ "type": "object",
+ "properties": {
+ "all": {
+ "type": "boolean"
+ },
+ "delay": {
+ "type": "integer"
+ },
+ "new": {
+ "type": "boolean"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "runtime": {
+ "type": "object",
+ "properties": {
+ "reconcileCHIsThreadsNumber": {
+ "type": "integer"
+ },
+ "reconcileShardsMaxConcurrencyPercent": {
+ "type": "integer"
+ },
+ "reconcileShardsThreadsNumber": {
+ "type": "integer"
+ }
+ }
+ },
+ "statefulSet": {
+ "type": "object",
+ "properties": {
+ "create": {
+ "type": "object",
+ "properties": {
+ "onFailure": {
+ "type": "string"
+ }
+ }
+ },
+ "update": {
+ "type": "object",
+ "properties": {
+ "onFailure": {
+ "type": "string"
+ },
+ "pollInterval": {
+ "type": "integer"
+ },
+ "timeout": {
+ "type": "integer"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "statefulSet": {
+ "type": "object",
+ "properties": {
+ "revisionHistoryLimit": {
+ "type": "integer"
+ }
+ }
+ },
+ "status": {
+ "type": "object",
+ "properties": {
+ "fields": {
+ "type": "object",
+ "properties": {
+ "action": {
+ "type": "boolean"
+ },
+ "actions": {
+ "type": "boolean"
+ },
+ "error": {
+ "type": "boolean"
+ },
+ "errors": {
+ "type": "boolean"
+ }
+ }
+ }
+ }
+ },
+ "template": {
+ "type": "object",
+ "properties": {
+ "chi": {
+ "type": "object",
+ "properties": {
+ "path": {
+ "type": "string"
+ },
+ "policy": {
+ "type": "string"
+ }
+ }
+ },
+ "chk": {
+ "type": "object",
+ "properties": {
+ "path": {
+ "type": "string"
+ },
+ "policy": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "watch": {
+ "type": "object",
+ "properties": {
+ "namespaces": {
+ "type": "array"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "keeperConfdFiles": {
+ "type": ["string", "null"]
+ },
+ "keeperConfigdFiles": {
+ "type": "object",
+ "properties": {
+ "01-keeper-01-default-config.xml": {
+ "type": "string"
+ },
+ "01-keeper-02-readiness.xml": {
+ "type": "string"
+ },
+ "01-keeper-03-enable-reconfig.xml": {
+ "type": "string"
+ }
+ }
+ },
+ "keeperTemplatesdFiles": {
+ "type": ["object", "null"],
+ "properties": {
+ "readme": {
+ "type": ["string", "null"]
+ }
+ }
+ },
+ "keeperUsersdFiles": {
+ "type": ["string", "null"]
+ },
+ "templatesdFiles": {
+ "type": ["object", "null"],
+ "properties": {
+ "001-templates.json.example": {
+ "type": ["string", "null"]
+ },
+ "default-pod-template.yaml.example": {
+ "type": ["string", "null"]
+ },
+ "default-storage-template.yaml.example": {
+ "type": ["string", "null"]
+ },
+ "readme": {
+ "type": ["string", "null"]
+ }
+ }
+ },
+ "usersdFiles": {
+ "type": ["object", "null"],
+ "properties": {
+ "01-clickhouse-operator-profile.xml": {
+ "type": ["string", "null"]
+ },
+ "02-clickhouse-default-profile.xml": {
+ "type": ["string", "null"]
+ }
+ }
+ }
+ }
+ },
+ "dashboards": {
+ "type": "object",
+ "properties": {
+ "additionalLabels": {
+ "description": "labels to add to a secret with dashboards",
+ "type": "object",
+ "properties": {
+ "grafana_dashboard": {
+ "type": "string"
+ }
+ }
+ },
+ "annotations": {
+ "description": "annotations to add to a secret with dashboards",
+ "type": "object"
+ },
+ "enabled": {
+ "description": "provision grafana dashboards as configMaps (can be synced by grafana dashboards sidecar https://github.com/grafana/helm-charts/blob/grafana-8.3.4/charts/grafana/values.yaml#L778 )",
+ "type": "boolean"
+ },
+ "grafana_folder": {
+ "type": "string"
+ }
+ }
+ },
+ "deployment": {
+ "type": "object",
+ "properties": {
+ "strategy": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "fullnameOverride": {
+ "description": "full name of the chart.",
+ "type": "string"
+ },
+ "imagePullSecrets": {
+ "description": "image pull secret for private images in clickhouse-operator pod possible value format `[{\"name\":\"your-secret-name\"}]`, check `kubectl explain pod.spec.imagePullSecrets` for details",
+ "type": "array"
+ },
+ "metrics": {
+ "type": "object",
+ "properties": {
+ "containerSecurityContext": {
+ "type": "object"
+ },
+ "enabled": {
+ "type": "boolean"
+ },
+ "env": {
+ "description": "additional environment variables for the deployment of metrics-exporter containers possible format value `[{\"name\": \"SAMPLE\", \"value\": \"text\"}]`",
+ "type": "array"
+ },
+ "image": {
+ "type": "object",
+ "properties": {
+ "pullPolicy": {
+ "description": "image pull policy",
+ "type": "string"
+ },
+ "repository": {
+ "description": "image repository",
+ "type": "string"
+ },
+ "tag": {
+ "description": "image tag (chart's appVersion value will be used if not set)",
+ "type": "string"
+ }
+ }
+ },
+ "resources": {
+ "description": "custom resource configuration",
+ "type": "object"
+ }
+ }
+ },
+ "nameOverride": {
+ "description": "override name of the chart",
+ "type": "string"
+ },
+ "namespaceOverride": {
+ "type": "string"
+ },
+ "nodeSelector": {
+ "description": "node for scheduler pod assignment, check `kubectl explain pod.spec.nodeSelector` for details",
+ "type": "object"
+ },
+ "operator": {
+ "type": "object",
+ "properties": {
+ "containerSecurityContext": {
+ "type": "object"
+ },
+ "env": {
+ "description": "additional environment variables for the clickhouse-operator container in deployment possible format value `[{\"name\": \"SAMPLE\", \"value\": \"text\"}]`",
+ "type": "array"
+ },
+ "image": {
+ "type": "object",
+ "properties": {
+ "pullPolicy": {
+ "description": "image pull policy",
+ "type": "string"
+ },
+ "repository": {
+ "description": "image repository",
+ "type": "string"
+ },
+ "tag": {
+ "description": "image tag (chart's appVersion value will be used if not set)",
+ "type": "string"
+ }
+ }
+ },
+ "priorityClassName": {
+ "description": "priority class name for the clickhouse-operator deployment, check `kubectl explain pod.spec.priorityClassName` for details",
+ "type": "string"
+ },
+ "resources": {
+ "description": "custom resource configuration, check `kubectl explain pod.spec.containers.resources` for details",
+ "type": "object"
+ }
+ }
+ },
+ "podAnnotations": {
+ "description": "annotations to add to the clickhouse-operator pod, check `kubectl explain pod.spec.annotations` for details",
+ "type": "object",
+ "properties": {
+ "clickhouse-operator-metrics/port": {
+ "type": "string"
+ },
+ "clickhouse-operator-metrics/scrape": {
+ "type": "string"
+ },
+ "prometheus.io/port": {
+ "type": "string"
+ },
+ "prometheus.io/scrape": {
+ "type": "string"
+ }
+ }
+ },
+ "podLabels": {
+ "description": "labels to add to the clickhouse-operator pod",
+ "type": "object"
+ },
+ "podSecurityContext": {
+ "type": "object"
+ },
+ "rbac": {
+ "type": "object",
+ "properties": {
+ "create": {
+ "description": "specifies whether rbac resources should be created",
+ "type": "boolean"
+ },
+ "namespaceScoped": {
+ "description": "specifies whether to create roles and rolebindings at the cluster level or namespace level",
+ "type": "boolean"
+ }
+ }
+ },
+ "secret": {
+ "type": "object",
+ "properties": {
+ "create": {
+ "description": "create a secret with operator credentials",
+ "type": "boolean"
+ },
+ "password": {
+ "description": "operator credentials password",
+ "type": "string"
+ },
+ "username": {
+ "description": "operator credentials username",
+ "type": "string"
+ }
+ }
+ },
+ "serviceAccount": {
+ "type": "object",
+ "properties": {
+ "annotations": {
+ "description": "annotations to add to the service account",
+ "type": "object"
+ },
+ "create": {
+ "description": "specifies whether a service account should be created",
+ "type": "boolean"
+ },
+ "name": {
+ "description": "the name of the service account to use; if not set and create is true, a name is generated using the fullname template",
+ "type": "null"
+ }
+ }
+ },
+ "serviceMonitor": {
+ "type": "object",
+ "properties": {
+ "additionalLabels": {
+ "description": "additional labels for service monitor",
+ "type": "object"
+ },
+ "clickhouseMetrics": {
+ "type": "object",
+ "properties": {
+ "interval": {
+ "type": "string"
+ },
+ "metricRelabelings": {
+ "type": "array"
+ },
+ "relabelings": {
+ "type": "array"
+ },
+ "scrapeTimeout": {
+ "type": "string"
+ }
+ }
+ },
+ "enabled": {
+ "description": "ServiceMonitor Custom resource is created for a [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) In serviceMonitor will be created two endpoints ch-metrics on port 8888 and op-metrics # 9999. Ypu can specify interval, scrapeTimeout, relabelings, metricRelabelings for each endpoint below",
+ "type": "boolean"
+ },
+ "operatorMetrics": {
+ "type": "object",
+ "properties": {
+ "interval": {
+ "type": "string"
+ },
+ "metricRelabelings": {
+ "type": "array"
+ },
+ "relabelings": {
+ "type": "array"
+ },
+ "scrapeTimeout": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "tolerations": {
+ "description": "tolerations for scheduler pod assignment, check `kubectl explain pod.spec.tolerations` for details",
+ "type": "array"
+ },
+ "topologySpreadConstraints": {
+ "type": "array"
+ }
+ }
+}
diff --git a/deploy/helm/clickhouse-operator/values.yaml b/deploy/helm/clickhouse-operator/values.yaml
index 6d08ae3e3..d4d58774e 100644
--- a/deploy/helm/clickhouse-operator/values.yaml
+++ b/deploy/helm/clickhouse-operator/values.yaml
@@ -233,7 +233,6 @@ configs:
# Concurrently running operators should watch on different namespaces.
# IMPORTANT
# Regexp is applicable.
- #namespaces: ["dev", "test"]
namespaces: []
clickhouse:
configuration:
@@ -541,15 +540,31 @@ configs:
# - to be excluded from a ClickHouse cluster
# - to complete all running queries
# - to be included into a ClickHouse cluster
- # respectfully before moving forward
+ # respectfully before moving forward with host reconcile
wait:
exclude: true
queries: true
include: false
+ # The operator during reconcile procedure should wait for replicas to catch-up
+ # replication delay a.k.a replication lag for the following replicas
replicas:
+ # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
all: no
+ # New replicas only are requested to wait for replication to catch-up
new: yes
+ # Replication catch-up is considered to be completed as soon as replication delay
+ # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
+ # is within this specified delay (in seconds)
delay: 10
+ probes:
+ # Whether the operator during host launch procedure should wait for startup probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to do not wait.
+ startup: no
+ # Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to wait.
+ readiness: yes
################################################
##
## Annotations management section
diff --git a/deploy/operator/clickhouse-operator-install-ansible.yaml b/deploy/operator/clickhouse-operator-install-ansible.yaml
index 342a2b25e..482074ed0 100644
--- a/deploy/operator/clickhouse-operator-install-ansible.yaml
+++ b/deploy/operator/clickhouse-operator-install-ansible.yaml
@@ -11,14 +11,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -365,9 +365,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -440,20 +440,6 @@ spec:
service:
<<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -497,6 +483,72 @@ spec:
enabled:
<<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ <<: *TypeStringBool
+ queries:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -627,6 +679,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -828,19 +883,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ <<: *TypeReconcileRuntime
+ host:
+ <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -1127,7 +1172,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1383,7 +1428,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
@@ -1398,14 +1443,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1752,9 +1797,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -1827,20 +1872,6 @@ spec:
service:
<<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -1884,6 +1915,72 @@ spec:
enabled:
<<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ <<: *TypeStringBool
+ queries:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -2014,6 +2111,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -2215,19 +2315,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ <<: *TypeReconcileRuntime
+ host:
+ <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -2514,7 +2604,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -2770,7 +2860,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
@@ -2788,7 +2878,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2834,10 +2924,9 @@ spec:
description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
properties:
namespaces:
- type: array
+ type: object
description: "List of namespaces where clickhouse-operator watches for events."
- items:
- type: string
+ x-kubernetes-preserve-unknown-fields: true
clickhouse:
type: object
description: "Clickhouse related parameters used by clickhouse-operator"
@@ -3108,7 +3197,6 @@ spec:
- to complete all running queries
- to be included into a ClickHouse cluster
respectfully before moving forward
-
properties:
wait:
type: object
@@ -3160,6 +3248,22 @@ spec:
delay:
type: integer
description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
annotation:
type: object
description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
@@ -3292,14 +3396,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.3
+ clickhouse-keeper.altinity.com/chop: 0.25.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3968,7 +4072,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -4190,7 +4294,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
---
# Template Parameters:
#
@@ -4216,7 +4320,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
rules:
#
@@ -4336,6 +4440,19 @@ rules:
- create
- delete
+ #
+ # discovery.* resources
+ #
+
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+
#
# apiextensions
#
@@ -4435,7 +4552,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -4457,7 +4574,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
config.yaml: |
@@ -4486,7 +4603,6 @@ data:
# Concurrently running operators should watch on different namespaces.
# IMPORTANT
# Regexp is applicable.
- #namespaces: ["dev", "test"]
namespaces: [{{ namespace }}]
clickhouse:
@@ -4819,15 +4935,31 @@ data:
# - to be excluded from a ClickHouse cluster
# - to complete all running queries
# - to be included into a ClickHouse cluster
- # respectfully before moving forward
+ # respectfully before moving forward with host reconcile
wait:
exclude: true
queries: true
include: false
+ # The operator during reconcile procedure should wait for replicas to catch-up
+ # replication delay a.k.a replication lag for the following replicas
replicas:
+ # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
all: no
+ # New replicas only are requested to wait for replication to catch-up
new: yes
+ # Replication catch-up is considered to be completed as soon as replication delay
+ # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
+ # is within this specified delay (in seconds)
delay: 10
+ probes:
+ # Whether the operator during host launch procedure should wait for startup probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to do not wait.
+ startup: no
+ # Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to wait.
+ readiness: yes
################################################
##
@@ -4940,7 +5072,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -4956,7 +5088,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -5055,7 +5187,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -5155,7 +5287,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -5218,7 +5350,7 @@ metadata:
name: etc-keeper-operator-confd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -5234,7 +5366,7 @@ metadata:
name: etc-keeper-operator-configd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-keeper-01-default-config.xml: |
@@ -5312,7 +5444,7 @@ metadata:
name: etc-keeper-operator-templatesd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
readme: |
@@ -5330,7 +5462,7 @@ metadata:
name: etc-keeper-operator-usersd-files
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -5338,7 +5470,7 @@ data:
# Template parameters available:
# NAMESPACE={{ namespace }}
# COMMENT=
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN={{ password }}
#
@@ -5348,7 +5480,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
type: Opaque
stringData:
@@ -5359,9 +5491,9 @@ stringData:
#
# NAMESPACE={{ namespace }}
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.3
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.4
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.3
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.4
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -5372,7 +5504,7 @@ metadata:
name: clickhouse-operator
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
spec:
replicas: 1
@@ -5420,7 +5552,7 @@ spec:
name: etc-keeper-operator-usersd-files
containers:
- name: clickhouse-operator
- image: altinity/clickhouse-operator:0.25.3
+ image: altinity/clickhouse-operator:0.25.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5496,7 +5628,7 @@ spec:
name: op-metrics
- name: metrics-exporter
- image: altinity/metrics-exporter:0.25.3
+ image: altinity/metrics-exporter:0.25.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5587,7 +5719,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: {{ namespace }}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml
index 58ea09de5..c964d7a9b 100644
--- a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml
+++ b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -353,9 +353,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -428,20 +428,6 @@ spec:
service:
!!merge <<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -485,6 +471,72 @@ spec:
enabled:
!!merge <<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ !!merge <<: *TypeStringBool
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ !!merge <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -615,6 +667,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -813,19 +868,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ !!merge <<: *TypeReconcileRuntime
+ host:
+ !!merge <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -1112,7 +1157,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1366,7 +1411,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
@@ -1381,14 +1426,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1728,9 +1773,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -1803,20 +1848,6 @@ spec:
service:
!!merge <<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -1860,6 +1891,72 @@ spec:
enabled:
!!merge <<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ !!merge <<: *TypeStringBool
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ !!merge <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -1990,6 +2087,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -2188,19 +2288,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ !!merge <<: *TypeReconcileRuntime
+ host:
+ !!merge <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -2487,7 +2577,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -2741,7 +2831,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
@@ -2759,7 +2849,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2801,10 +2891,9 @@ spec:
description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
properties:
namespaces:
- type: array
+ type: object
description: "List of namespaces where clickhouse-operator watches for events."
- items:
- type: string
+ x-kubernetes-preserve-unknown-fields: true
clickhouse:
type: object
description: "Clickhouse related parameters used by clickhouse-operator"
@@ -3126,6 +3215,22 @@ spec:
delay:
type: integer
description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
annotation:
type: object
description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
@@ -3253,14 +3358,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.3
+ clickhouse-keeper.altinity.com/chop: 0.25.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3928,7 +4033,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -4148,7 +4253,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
# Template Parameters:
#
@@ -4173,7 +4278,7 @@ metadata:
name: clickhouse-operator-kube-system
#namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
rules:
#
# Core API group
@@ -4287,6 +4392,17 @@ rules:
- create
- delete
#
+ # discovery.* resources
+ #
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+ #
# apiextensions
#
- apiGroups:
@@ -4382,7 +4498,7 @@ metadata:
name: clickhouse-operator-kube-system
#namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -4415,7 +4531,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
rules:
#
# Core API group
@@ -4529,6 +4645,17 @@ rules:
- create
- delete
#
+ # discovery.* resources
+ #
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+ #
# apiextensions
#
- apiGroups:
@@ -4624,7 +4751,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -4646,7 +4773,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
config.yaml: |
@@ -4675,7 +4802,6 @@ data:
# Concurrently running operators should watch on different namespaces.
# IMPORTANT
# Regexp is applicable.
- #namespaces: ["dev", "test"]
namespaces: []
clickhouse:
@@ -5008,15 +5134,31 @@ data:
# - to be excluded from a ClickHouse cluster
# - to complete all running queries
# - to be included into a ClickHouse cluster
- # respectfully before moving forward
+ # respectfully before moving forward with host reconcile
wait:
exclude: true
queries: true
include: false
+ # The operator during reconcile procedure should wait for replicas to catch-up
+ # replication delay a.k.a replication lag for the following replicas
replicas:
+ # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
all: no
+ # New replicas only are requested to wait for replication to catch-up
new: yes
+ # Replication catch-up is considered to be completed as soon as replication delay
+ # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
+ # is within this specified delay (in seconds)
delay: 10
+ probes:
+ # Whether the operator during host launch procedure should wait for startup probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to do not wait.
+ startup: no
+ # Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to wait.
+ readiness: yes
################################################
##
@@ -5128,7 +5270,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -5144,7 +5286,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -5238,7 +5380,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -5336,7 +5478,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -5398,7 +5540,7 @@ metadata:
name: etc-keeper-operator-confd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -5414,7 +5556,7 @@ metadata:
name: etc-keeper-operator-configd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-keeper-01-default-config.xml: |
@@ -5489,7 +5631,7 @@ metadata:
name: etc-keeper-operator-templatesd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
readme: |
@@ -5507,7 +5649,7 @@ metadata:
name: etc-keeper-operator-usersd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -5515,7 +5657,7 @@ data:
# Template parameters available:
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
@@ -5525,7 +5667,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
type: Opaque
stringData:
@@ -5536,9 +5678,9 @@ stringData:
#
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.3
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.4
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.3
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.4
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -5549,7 +5691,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
spec:
replicas: 1
@@ -5597,7 +5739,7 @@ spec:
name: etc-keeper-operator-usersd-files
containers:
- name: clickhouse-operator
- image: altinity/clickhouse-operator:0.25.3
+ image: altinity/clickhouse-operator:0.25.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5671,7 +5813,7 @@ spec:
- containerPort: 9999
name: op-metrics
- name: metrics-exporter
- image: altinity/metrics-exporter:0.25.3
+ image: altinity/metrics-exporter:0.25.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5761,7 +5903,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-bundle.yaml b/deploy/operator/clickhouse-operator-install-bundle.yaml
index a7ad86853..72ca35c50 100644
--- a/deploy/operator/clickhouse-operator-install-bundle.yaml
+++ b/deploy/operator/clickhouse-operator-install-bundle.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -358,9 +358,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -433,20 +433,6 @@ spec:
service:
<<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -490,6 +476,72 @@ spec:
enabled:
<<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ <<: *TypeStringBool
+ queries:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -620,6 +672,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -821,19 +876,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ <<: *TypeReconcileRuntime
+ host:
+ <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -1120,7 +1165,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1376,7 +1421,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
@@ -1391,14 +1436,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1745,9 +1790,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -1820,20 +1865,6 @@ spec:
service:
<<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -1877,6 +1908,72 @@ spec:
enabled:
<<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ <<: *TypeStringBool
+ queries:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -2007,6 +2104,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -2208,19 +2308,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ <<: *TypeReconcileRuntime
+ host:
+ <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -2507,7 +2597,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -2763,7 +2853,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
@@ -2781,7 +2871,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2827,10 +2917,9 @@ spec:
description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
properties:
namespaces:
- type: array
+ type: object
description: "List of namespaces where clickhouse-operator watches for events."
- items:
- type: string
+ x-kubernetes-preserve-unknown-fields: true
clickhouse:
type: object
description: "Clickhouse related parameters used by clickhouse-operator"
@@ -3101,7 +3190,6 @@ spec:
- to complete all running queries
- to be included into a ClickHouse cluster
respectfully before moving forward
-
properties:
wait:
type: object
@@ -3153,6 +3241,22 @@ spec:
delay:
type: integer
description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
annotation:
type: object
description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
@@ -3285,14 +3389,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.3
+ clickhouse-keeper.altinity.com/chop: 0.25.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3961,7 +4065,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -4183,7 +4287,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
---
# Template Parameters:
#
@@ -4209,7 +4313,7 @@ metadata:
name: clickhouse-operator-kube-system
#namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
rules:
#
@@ -4329,6 +4433,19 @@ rules:
- create
- delete
+ #
+ # discovery.* resources
+ #
+
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+
#
# apiextensions
#
@@ -4428,7 +4545,7 @@ metadata:
name: clickhouse-operator-kube-system
#namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -4462,7 +4579,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
rules:
#
@@ -4582,6 +4699,19 @@ rules:
- create
- delete
+ #
+ # discovery.* resources
+ #
+
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+
#
# apiextensions
#
@@ -4681,7 +4811,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -4703,7 +4833,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
config.yaml: |
@@ -4732,7 +4862,6 @@ data:
# Concurrently running operators should watch on different namespaces.
# IMPORTANT
# Regexp is applicable.
- #namespaces: ["dev", "test"]
namespaces: []
clickhouse:
@@ -5065,15 +5194,31 @@ data:
# - to be excluded from a ClickHouse cluster
# - to complete all running queries
# - to be included into a ClickHouse cluster
- # respectfully before moving forward
+ # respectfully before moving forward with host reconcile
wait:
exclude: true
queries: true
include: false
+ # The operator during reconcile procedure should wait for replicas to catch-up
+ # replication delay a.k.a replication lag for the following replicas
replicas:
+ # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
all: no
+ # New replicas only are requested to wait for replication to catch-up
new: yes
+ # Replication catch-up is considered to be completed as soon as replication delay
+ # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
+ # is within this specified delay (in seconds)
delay: 10
+ probes:
+ # Whether the operator during host launch procedure should wait for startup probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to do not wait.
+ startup: no
+ # Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to wait.
+ readiness: yes
################################################
##
@@ -5186,7 +5331,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -5202,7 +5347,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -5301,7 +5446,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -5401,7 +5546,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -5464,7 +5609,7 @@ metadata:
name: etc-keeper-operator-confd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -5480,7 +5625,7 @@ metadata:
name: etc-keeper-operator-configd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-keeper-01-default-config.xml: |
@@ -5558,7 +5703,7 @@ metadata:
name: etc-keeper-operator-templatesd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
readme: |
@@ -5576,7 +5721,7 @@ metadata:
name: etc-keeper-operator-usersd-files
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -5584,7 +5729,7 @@ data:
# Template parameters available:
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
@@ -5594,7 +5739,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
type: Opaque
stringData:
@@ -5605,9 +5750,9 @@ stringData:
#
# NAMESPACE=kube-system
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.3
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.4
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.3
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.4
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -5618,7 +5763,7 @@ metadata:
name: clickhouse-operator
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
spec:
replicas: 1
@@ -5666,7 +5811,7 @@ spec:
name: etc-keeper-operator-usersd-files
containers:
- name: clickhouse-operator
- image: altinity/clickhouse-operator:0.25.3
+ image: altinity/clickhouse-operator:0.25.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5742,7 +5887,7 @@ spec:
name: op-metrics
- name: metrics-exporter
- image: altinity/metrics-exporter:0.25.3
+ image: altinity/metrics-exporter:0.25.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5833,7 +5978,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: kube-system
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml
index 16ffaba03..85d464dfc 100644
--- a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml
+++ b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -353,9 +353,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -428,20 +428,6 @@ spec:
service:
!!merge <<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -485,6 +471,72 @@ spec:
enabled:
!!merge <<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ !!merge <<: *TypeStringBool
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ !!merge <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -615,6 +667,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -813,19 +868,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ !!merge <<: *TypeReconcileRuntime
+ host:
+ !!merge <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -1112,7 +1157,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1366,7 +1411,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
@@ -1381,14 +1426,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1728,9 +1773,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -1803,20 +1848,6 @@ spec:
service:
!!merge <<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -1860,6 +1891,72 @@ spec:
enabled:
!!merge <<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ !!merge <<: *TypeStringBool
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ !!merge <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -1990,6 +2087,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -2188,19 +2288,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ !!merge <<: *TypeReconcileRuntime
+ host:
+ !!merge <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -2487,7 +2577,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -2741,7 +2831,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
@@ -2759,7 +2849,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2801,10 +2891,9 @@ spec:
description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
properties:
namespaces:
- type: array
+ type: object
description: "List of namespaces where clickhouse-operator watches for events."
- items:
- type: string
+ x-kubernetes-preserve-unknown-fields: true
clickhouse:
type: object
description: "Clickhouse related parameters used by clickhouse-operator"
@@ -3126,6 +3215,22 @@ spec:
delay:
type: integer
description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
annotation:
type: object
description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
@@ -3253,14 +3358,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.3
+ clickhouse-keeper.altinity.com/chop: 0.25.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3928,7 +4033,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -4148,7 +4253,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
# Template Parameters:
#
@@ -4173,7 +4278,7 @@ metadata:
name: clickhouse-operator-${OPERATOR_NAMESPACE}
#namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
rules:
#
# Core API group
@@ -4287,6 +4392,17 @@ rules:
- create
- delete
#
+ # discovery.* resources
+ #
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+ #
# apiextensions
#
- apiGroups:
@@ -4382,7 +4498,7 @@ metadata:
name: clickhouse-operator-${OPERATOR_NAMESPACE}
#namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -4404,7 +4520,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
config.yaml: |
@@ -4433,7 +4549,6 @@ data:
# Concurrently running operators should watch on different namespaces.
# IMPORTANT
# Regexp is applicable.
- #namespaces: ["dev", "test"]
namespaces: []
clickhouse:
@@ -4766,15 +4881,31 @@ data:
# - to be excluded from a ClickHouse cluster
# - to complete all running queries
# - to be included into a ClickHouse cluster
- # respectfully before moving forward
+ # respectfully before moving forward with host reconcile
wait:
exclude: true
queries: true
include: false
+ # The operator during reconcile procedure should wait for replicas to catch-up
+ # replication delay a.k.a replication lag for the following replicas
replicas:
+ # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
all: no
+ # New replicas only are requested to wait for replication to catch-up
new: yes
+ # Replication catch-up is considered to be completed as soon as replication delay
+ # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
+ # is within this specified delay (in seconds)
delay: 10
+ probes:
+ # Whether the operator during host launch procedure should wait for startup probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to do not wait.
+ startup: no
+ # Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to wait.
+ readiness: yes
################################################
##
@@ -4886,7 +5017,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -4902,7 +5033,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -4996,7 +5127,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -5094,7 +5225,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -5156,7 +5287,7 @@ metadata:
name: etc-keeper-operator-confd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -5172,7 +5303,7 @@ metadata:
name: etc-keeper-operator-configd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-keeper-01-default-config.xml: |
@@ -5247,7 +5378,7 @@ metadata:
name: etc-keeper-operator-templatesd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
readme: |
@@ -5265,7 +5396,7 @@ metadata:
name: etc-keeper-operator-usersd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -5273,7 +5404,7 @@ data:
# Template parameters available:
# NAMESPACE=${OPERATOR_NAMESPACE}
# COMMENT=
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
@@ -5283,7 +5414,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
type: Opaque
stringData:
@@ -5307,7 +5438,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
spec:
replicas: 1
@@ -5519,7 +5650,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-template.yaml b/deploy/operator/clickhouse-operator-install-template.yaml
index 3ea78fed6..f1319b629 100644
--- a/deploy/operator/clickhouse-operator-install-template.yaml
+++ b/deploy/operator/clickhouse-operator-install-template.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -358,9 +358,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -433,20 +433,6 @@ spec:
service:
<<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -490,6 +476,72 @@ spec:
enabled:
<<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ <<: *TypeStringBool
+ queries:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -620,6 +672,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -821,19 +876,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ <<: *TypeReconcileRuntime
+ host:
+ <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -1120,7 +1165,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1376,7 +1421,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
@@ -1391,14 +1436,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1745,9 +1790,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -1820,20 +1865,6 @@ spec:
service:
<<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -1877,6 +1908,72 @@ spec:
enabled:
<<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ <<: *TypeStringBool
+ queries:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -2007,6 +2104,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -2208,19 +2308,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ <<: *TypeReconcileRuntime
+ host:
+ <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -2507,7 +2597,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -2763,7 +2853,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
@@ -2781,7 +2871,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2827,10 +2917,9 @@ spec:
description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
properties:
namespaces:
- type: array
+ type: object
description: "List of namespaces where clickhouse-operator watches for events."
- items:
- type: string
+ x-kubernetes-preserve-unknown-fields: true
clickhouse:
type: object
description: "Clickhouse related parameters used by clickhouse-operator"
@@ -3101,7 +3190,6 @@ spec:
- to complete all running queries
- to be included into a ClickHouse cluster
respectfully before moving forward
-
properties:
wait:
type: object
@@ -3153,6 +3241,22 @@ spec:
delay:
type: integer
description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
annotation:
type: object
description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
@@ -3285,14 +3389,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.3
+ clickhouse-keeper.altinity.com/chop: 0.25.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3961,7 +4065,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -4183,7 +4287,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
---
# Template Parameters:
#
@@ -4209,7 +4313,7 @@ metadata:
name: clickhouse-operator-${OPERATOR_NAMESPACE}
#namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
rules:
#
@@ -4329,6 +4433,19 @@ rules:
- create
- delete
+ #
+ # discovery.* resources
+ #
+
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+
#
# apiextensions
#
@@ -4428,7 +4545,7 @@ metadata:
name: clickhouse-operator-${OPERATOR_NAMESPACE}
#namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -4450,7 +4567,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
config.yaml: |
@@ -4479,7 +4596,6 @@ data:
# Concurrently running operators should watch on different namespaces.
# IMPORTANT
# Regexp is applicable.
- #namespaces: ["dev", "test"]
namespaces: []
clickhouse:
@@ -4812,15 +4928,31 @@ data:
# - to be excluded from a ClickHouse cluster
# - to complete all running queries
# - to be included into a ClickHouse cluster
- # respectfully before moving forward
+ # respectfully before moving forward with host reconcile
wait:
exclude: true
queries: true
include: false
+ # The operator during reconcile procedure should wait for replicas to catch-up
+ # replication delay a.k.a replication lag for the following replicas
replicas:
+ # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
all: no
+ # New replicas only are requested to wait for replication to catch-up
new: yes
+ # Replication catch-up is considered to be completed as soon as replication delay
+ # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
+ # is within this specified delay (in seconds)
delay: 10
+ probes:
+ # Whether the operator during host launch procedure should wait for startup probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to do not wait.
+ startup: no
+ # Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to wait.
+ readiness: yes
################################################
##
@@ -4933,7 +5065,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -4949,7 +5081,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -5048,7 +5180,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -5148,7 +5280,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -5211,7 +5343,7 @@ metadata:
name: etc-keeper-operator-confd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -5227,7 +5359,7 @@ metadata:
name: etc-keeper-operator-configd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-keeper-01-default-config.xml: |
@@ -5305,7 +5437,7 @@ metadata:
name: etc-keeper-operator-templatesd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
readme: |
@@ -5323,7 +5455,7 @@ metadata:
name: etc-keeper-operator-usersd-files
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -5331,7 +5463,7 @@ data:
# Template parameters available:
# NAMESPACE=${OPERATOR_NAMESPACE}
# COMMENT=
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
@@ -5341,7 +5473,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
type: Opaque
stringData:
@@ -5365,7 +5497,7 @@ metadata:
name: clickhouse-operator
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
spec:
replicas: 1
@@ -5580,7 +5712,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: ${OPERATOR_NAMESPACE}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/clickhouse-operator-install-tf.yaml b/deploy/operator/clickhouse-operator-install-tf.yaml
index 51139efe4..e159b56aa 100644
--- a/deploy/operator/clickhouse-operator-install-tf.yaml
+++ b/deploy/operator/clickhouse-operator-install-tf.yaml
@@ -11,14 +11,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -365,9 +365,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -440,20 +440,6 @@ spec:
service:
<<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -497,6 +483,72 @@ spec:
enabled:
<<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ <<: *TypeStringBool
+ queries:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -627,6 +679,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -828,19 +883,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ <<: *TypeReconcileRuntime
+ host:
+ <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -1127,7 +1172,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1383,7 +1428,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
@@ -1398,14 +1443,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -1752,9 +1797,9 @@ spec:
description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ reconciling: &TypeReconcile
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -1827,20 +1872,6 @@ spec:
service:
<<: *TypeObjectsCleanup
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -1884,6 +1915,72 @@ spec:
enabled:
<<: *TypeStringBool
description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ <<: *TypeStringBool
+ queries:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -2014,6 +2111,9 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -2215,19 +2315,9 @@ spec:
description: "allow tuning reconciling process"
properties:
runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ <<: *TypeReconcileRuntime
+ host:
+ <<: *TypeReconcileHost
layout:
type: object
description: |
@@ -2514,7 +2604,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -2770,7 +2860,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
@@ -2788,7 +2878,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2834,10 +2924,9 @@ spec:
description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
properties:
namespaces:
- type: array
+ type: object
description: "List of namespaces where clickhouse-operator watches for events."
- items:
- type: string
+ x-kubernetes-preserve-unknown-fields: true
clickhouse:
type: object
description: "Clickhouse related parameters used by clickhouse-operator"
@@ -3108,7 +3197,6 @@ spec:
- to complete all running queries
- to be included into a ClickHouse cluster
respectfully before moving forward
-
properties:
wait:
type: object
@@ -3160,6 +3248,22 @@ spec:
delay:
type: integer
description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
annotation:
type: object
description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
@@ -3292,14 +3396,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.3
+ clickhouse-keeper.altinity.com/chop: 0.25.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -3968,7 +4072,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -4190,7 +4294,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
---
# Template Parameters:
#
@@ -4216,7 +4320,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
rules:
#
@@ -4336,6 +4440,19 @@ rules:
- create
- delete
+ #
+ # discovery.* resources
+ #
+
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+
#
# apiextensions
#
@@ -4435,7 +4552,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -4457,7 +4574,7 @@ metadata:
name: etc-clickhouse-operator-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
config.yaml: |
@@ -4486,7 +4603,6 @@ data:
# Concurrently running operators should watch on different namespaces.
# IMPORTANT
# Regexp is applicable.
- #namespaces: ["dev", "test"]
namespaces: [${namespace}]
clickhouse:
@@ -4819,15 +4935,31 @@ data:
# - to be excluded from a ClickHouse cluster
# - to complete all running queries
# - to be included into a ClickHouse cluster
- # respectfully before moving forward
+ # respectfully before moving forward with host reconcile
wait:
exclude: true
queries: true
include: false
+ # The operator during reconcile procedure should wait for replicas to catch-up
+ # replication delay a.k.a replication lag for the following replicas
replicas:
+ # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
all: no
+ # New replicas only are requested to wait for replication to catch-up
new: yes
+ # Replication catch-up is considered to be completed as soon as replication delay
+ # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
+ # is within this specified delay (in seconds)
delay: 10
+ probes:
+ # Whether the operator during host launch procedure should wait for startup probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to do not wait.
+ startup: no
+ # Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to wait.
+ readiness: yes
################################################
##
@@ -4940,7 +5072,7 @@ metadata:
name: etc-clickhouse-operator-confd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -4956,7 +5088,7 @@ metadata:
name: etc-clickhouse-operator-configd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-clickhouse-01-listen.xml: |
@@ -5055,7 +5187,7 @@ metadata:
name: etc-clickhouse-operator-templatesd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
001-templates.json.example: |
@@ -5155,7 +5287,7 @@ metadata:
name: etc-clickhouse-operator-usersd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-clickhouse-operator-profile.xml: |
@@ -5218,7 +5350,7 @@ metadata:
name: etc-keeper-operator-confd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -5234,7 +5366,7 @@ metadata:
name: etc-keeper-operator-configd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
01-keeper-01-default-config.xml: |
@@ -5312,7 +5444,7 @@ metadata:
name: etc-keeper-operator-templatesd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
readme: |
@@ -5330,7 +5462,7 @@ metadata:
name: etc-keeper-operator-usersd-files
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
data:
---
@@ -5338,7 +5470,7 @@ data:
# Template parameters available:
# NAMESPACE=${namespace}
# COMMENT=
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=${password}
#
@@ -5348,7 +5480,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
type: Opaque
stringData:
@@ -5359,9 +5491,9 @@ stringData:
#
# NAMESPACE=${namespace}
# COMMENT=
-# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.3
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.4
# OPERATOR_IMAGE_PULL_POLICY=Always
-# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.3
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.4
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
@@ -5372,7 +5504,7 @@ metadata:
name: clickhouse-operator
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
spec:
replicas: 1
@@ -5420,7 +5552,7 @@ spec:
name: etc-keeper-operator-usersd-files
containers:
- name: clickhouse-operator
- image: altinity/clickhouse-operator:0.25.3
+ image: altinity/clickhouse-operator:0.25.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5496,7 +5628,7 @@ spec:
name: op-metrics
- name: metrics-exporter
- image: altinity/metrics-exporter:0.25.3
+ image: altinity/metrics-exporter:0.25.4
imagePullPolicy: Always
volumeMounts:
- name: etc-clickhouse-operator-folder
@@ -5587,7 +5719,7 @@ metadata:
name: clickhouse-operator-metrics
namespace: ${namespace}
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
app: clickhouse-operator
spec:
ports:
diff --git a/deploy/operator/parts/crd.yaml b/deploy/operator/parts/crd.yaml
index a8bde5a91..8f7f599f4 100644
--- a/deploy/operator/parts/crd.yaml
+++ b/deploy/operator/parts/crd.yaml
@@ -4,14 +4,14 @@
# SINGULAR=clickhouseinstallation
# PLURAL=clickhouseinstallations
# SHORT=chi
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -410,7 +410,7 @@ spec:
x-kubernetes-preserve-unknown-fields: true
reconciling:
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -517,20 +517,6 @@ spec:
- "Retain"
- "Delete"
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -699,271 +685,1062 @@ spec:
- "Enabled"
- "enabled"
description: "enabled or not"
- defaults:
- type: object
- description: |
- define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
- More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
- # nullable: true
- properties:
- replicasUseFQDN:
- type: string
- enum:
- # List StringBoolXXX constants from model
- - ""
- - "0"
- - "1"
- - "False"
- - "false"
- - "True"
- - "true"
- - "No"
- - "no"
- - "Yes"
- - "yes"
- - "Off"
- - "off"
- - "On"
- - "on"
- - "Disable"
- - "disable"
- - "Enable"
- - "enable"
- - "Disabled"
- - "disabled"
- - "Enabled"
- - "enabled"
- description: |
- define should replicas be specified by FQDN in ``.
- In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
- "no" by default
- distributedDDL:
- type: object
- description: |
- allows change `` settings
- More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
- # nullable: true
- properties:
- profile:
- type: string
- description: "Settings from this profile will be used to execute DDL queries"
- storageManagement:
- type: object
- description: default storage management options
- properties:
- provisioner:
- type: string
- description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
- enum:
- - ""
- - "StatefulSet"
- - "Operator"
- reclaimPolicy:
- type: string
- description: |
- defines behavior of `PVC` deletion.
- `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
- enum:
- - ""
- - "Retain"
- - "Delete"
- templates:
- type: object
- description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
- # nullable: true
- properties:
- hostTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
- podTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
- dataVolumeClaimTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
- logVolumeClaimTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
- serviceTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
- serviceTemplates:
- type: array
- description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
- nullable: true
- items:
- type: string
- clusterServiceTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
- shardServiceTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
- replicaServiceTemplate:
- type: string
- description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
- volumeClaimTemplate:
- type: string
- description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
- configuration:
- type: object
- description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
- # nullable: true
- properties:
- zookeeper:
+ runtime:
type: object
- description: |
- allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
- `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/
- currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl`
- More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper
- # nullable: true
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
properties:
- nodes:
- type: array
- description: "describe every available zookeeper cluster node for interaction"
- # nullable: true
- items:
- type: object
- #required:
- # - host
- properties:
- host:
- type: string
- description: "dns name or ip address for Zookeeper node"
- port:
- type: integer
- description: "TCP port which used to connect to Zookeeper node"
- minimum: 0
- maximum: 65535
- secure:
- type: string
- enum:
- # List StringBoolXXX constants from model
- - ""
- - "0"
- - "1"
- - "False"
- - "false"
- - "True"
- - "true"
- - "No"
- - "no"
- - "Yes"
- - "yes"
- - "Off"
- - "off"
- - "On"
- - "on"
- - "Disable"
- - "disable"
- - "Enable"
- - "enable"
- - "Disabled"
- - "disabled"
- - "Enabled"
- - "enabled"
- description: "if a secure connection to Zookeeper is required"
- availabilityZone:
- type: string
- description: "availability zone for Zookeeper node"
- session_timeout_ms:
+ reconcileShardsThreadsNumber:
type: integer
- description: "session timeout during connect to Zookeeper"
- operation_timeout_ms:
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
type: integer
- description: "one operation timeout during Zookeeper transactions"
- root:
- type: string
- description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)"
- identity:
- type: string
- description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
- users:
- type: object
- description: |
- allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
- you can configure password hashed, authorization restrictions, database level security row filters etc.
- More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/
- Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers
-
- any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
- secret value will pass in `pod.spec.containers.evn`, and generate with from_env=XXX in XML in /etc/clickhouse-server/users.d/chop-generated-users.xml
- it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
-
- look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
-
- any key with prefix `k8s_secret_` shall has value with format namespace/secret/key or secret/key
- in this case value from secret will write directly into XML tag during render *-usersd ConfigMap
-
- any key with prefix `k8s_secret_env` shall has value with format namespace/secret/key or secret/key
- in this case value from secret will write into environment variable and write to XML tag via from_env=XXX
-
- look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
- # nullable: true
- x-kubernetes-preserve-unknown-fields: true
- profiles:
- type: object
- description: |
- allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
- you can configure any aspect of settings profile
- More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/
- Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles
- # nullable: true
- x-kubernetes-preserve-unknown-fields: true
- quotas:
- type: object
- description: |
- allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
- you can configure any aspect of resource quotas
- More details: https://clickhouse.tech/docs/en/operations/quotas/
- Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas
- # nullable: true
- x-kubernetes-preserve-unknown-fields: true
- settings:
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host:
type: object
description: |
- allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
- More details: https://clickhouse.tech/docs/en/operations/settings/settings/
- Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings
-
- any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
- look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
-
- secret value will pass in `pod.spec.env`, and generate with from_env=XXX in XML in /etc/clickhouse-server/config.d/chop-generated-settings.xml
- it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
- # nullable: true
- x-kubernetes-preserve-unknown-fields: true
- files:
- type: object
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ queries:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ type: object
+ # nullable: true
+ properties:
+ policy:
+ type: string
description: |
- allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
- every key in this object is the file name
- every value in this object is the file content
- you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html
- each key could contains prefix like {common}, {users}, {hosts} or config.d, users.d, conf.d, wrong prefixes will be ignored, subfolders also will be ignored
- More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml
-
- any key could contains `valueFrom` with `secretKeyRef` which allow pass values from kubernetes secrets
- secrets will mounted into pod as separate volume in /etc/clickhouse-server/secrets.d/
- and will automatically update when update secret
- it useful for pass SSL certificates from cert-manager or similar tool
- look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
- # nullable: true
- x-kubernetes-preserve-unknown-fields: true
- clusters:
- type: array
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
description: |
- describes clusters layout and allows change settings on cluster-level, shard-level and replica-level
- every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server`
- all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml`
- Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
- If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables
- # nullable: true
- items:
- type: object
- #required:
- # - name
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet:
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ type: string
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ type: string
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ type: string
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ type: string
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ type: string
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ type: string
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ description: "Behavior policy for failed Service, `Retain` by default"
+ macros:
+ type: object
+ description: "macros parameters"
+ properties:
+ sections:
+ type: object
+ description: "sections behaviour for macros"
+ properties:
+ users:
+ type: object
+ description: "sections behaviour for macros on users"
+ properties:
+ enabled:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "enabled or not"
+ profiles:
+ type: object
+ description: "sections behaviour for macros on profiles"
+ properties:
+ enabled:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "enabled or not"
+ quotas:
+ type: object
+ description: "sections behaviour for macros on quotas"
+ properties:
+ enabled:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "enabled or not"
+ settings:
+ type: object
+ description: "sections behaviour for macros on settings"
+ properties:
+ enabled:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "enabled or not"
+ files:
+ type: object
+ description: "sections behaviour for macros on files"
+ properties:
+ enabled:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "enabled or not"
+ runtime:
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ queries:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "no" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner:
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy:
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates:
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ serviceTemplates:
+ type: array
+ description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ nullable: true
+ items:
+ type: string
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ zookeeper:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/
+ currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl`
+ More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper
+ # nullable: true
+ properties:
+ nodes:
+ type: array
+ description: "describe every available zookeeper cluster node for interaction"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - host
+ properties:
+ host:
+ type: string
+ description: "dns name or ip address for Zookeeper node"
+ port:
+ type: integer
+ description: "TCP port which used to connect to Zookeeper node"
+ minimum: 0
+ maximum: 65535
+ secure:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "if a secure connection to Zookeeper is required"
+ availabilityZone:
+ type: string
+ description: "availability zone for Zookeeper node"
+ session_timeout_ms:
+ type: integer
+ description: "session timeout during connect to Zookeeper"
+ operation_timeout_ms:
+ type: integer
+ description: "one operation timeout during Zookeeper transactions"
+ root:
+ type: string
+ description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)"
+ identity:
+ type: string
+ description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Enables compression in Keeper protocol if set to true"
+ users:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure password hashed, authorization restrictions, database level security row filters etc.
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ secret value will pass in `pod.spec.containers.evn`, and generate with from_env=XXX in XML in /etc/clickhouse-server/users.d/chop-generated-users.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ any key with prefix `k8s_secret_` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write directly into XML tag during render *-usersd ConfigMap
+
+ any key with prefix `k8s_secret_env` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write into environment variable and write to XML tag via from_env=XXX
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of settings profile
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of resource quotas
+ More details: https://clickhouse.tech/docs/en/operations/quotas/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ settings:
+ type: object
+ description: |
+ allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ secret value will pass in `pod.spec.env`, and generate with from_env=XXX in XML in /etc/clickhouse-server/config.d/chop-generated-settings.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files:
+ type: object
+ description: |
+ allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ every key in this object is the file name
+ every value in this object is the file content
+ you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html
+ each key could contains prefix like {common}, {users}, {hosts} or config.d, users.d, conf.d, wrong prefixes will be ignored, subfolders also will be ignored
+ More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass values from kubernetes secrets
+ secrets will mounted into pod as separate volume in /etc/clickhouse-server/secrets.d/
+ and will automatically update when update secret
+ it useful for pass SSL certificates from cert-manager or similar tool
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes clusters layout and allows change settings on cluster-level, shard-level and replica-level
+ every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server`
+ all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml`
+ Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
properties:
name:
type: string
@@ -1036,6 +1813,34 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Enables compression in Keeper protocol if set to true"
description: |
optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
override top-level `chi.spec.configuration.zookeeper` settings
@@ -1285,12 +2090,241 @@ spec:
type: integer
minimum: 1
maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
reconcileShardsMaxConcurrencyPercent:
type: integer
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ queries:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
layout:
type: object
description: |
@@ -1858,7 +2892,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -2215,7 +3249,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
@@ -2230,14 +3264,14 @@ spec:
# SINGULAR=clickhouseinstallationtemplate
# PLURAL=clickhouseinstallationtemplates
# SHORT=chit
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseinstallationtemplates.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -2631,12 +3665,531 @@ spec:
- "manual"
chiSelector:
type: object
- description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
- # nullable: true
- x-kubernetes-preserve-unknown-fields: true
- reconciling:
+ description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ reconciling:
+ type: object
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet:
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ type: string
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ type: string
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ type: string
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ type: string
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ type: string
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ type: string
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ description: "Behavior policy for failed Service, `Retain` by default"
+ macros:
+ type: object
+ description: "macros parameters"
+ properties:
+ sections:
+ type: object
+ description: "sections behaviour for macros"
+ properties:
+ users:
+ type: object
+ description: "sections behaviour for macros on users"
+ properties:
+ enabled:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "enabled or not"
+ profiles:
+ type: object
+ description: "sections behaviour for macros on profiles"
+ properties:
+ enabled:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "enabled or not"
+ quotas:
+ type: object
+ description: "sections behaviour for macros on quotas"
+ properties:
+ enabled:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "enabled or not"
+ settings:
+ type: object
+ description: "sections behaviour for macros on settings"
+ properties:
+ enabled:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "enabled or not"
+ files:
+ type: object
+ description: "sections behaviour for macros on files"
+ properties:
+ enabled:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "enabled or not"
+ runtime:
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ queries:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
type: object
- description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
# nullable: true
properties:
policy:
@@ -2743,20 +4296,6 @@ spec:
- "Retain"
- "Delete"
description: "Behavior policy for failed Service, `Retain` by default"
- runtime:
- type: object
- description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
- properties:
- reconcileShardsThreadsNumber:
- type: integer
- minimum: 1
- maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
- reconcileShardsMaxConcurrencyPercent:
- type: integer
- minimum: 0
- maximum: 100
- description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
macros:
type: object
description: "macros parameters"
@@ -2860,12 +4399,253 @@ spec:
- "disabled"
- "Enabled"
- "enabled"
- description: "enabled or not"
- settings:
+ description: "enabled or not"
+ settings:
+ type: object
+ description: "sections behaviour for macros on settings"
+ properties:
+ enabled:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "enabled or not"
+ files:
+ type: object
+ description: "sections behaviour for macros on files"
+ properties:
+ enabled:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "enabled or not"
+ runtime:
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ queries:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
type: object
- description: "sections behaviour for macros on settings"
+ description: "What probes the operator should wait during host launch procedure"
properties:
- enabled:
+ startup:
type: string
enum:
# List StringBoolXXX constants from model
@@ -2892,12 +4672,11 @@ spec:
- "disabled"
- "Enabled"
- "enabled"
- description: "enabled or not"
- files:
- type: object
- description: "sections behaviour for macros on files"
- properties:
- enabled:
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
type: string
enum:
# List StringBoolXXX constants from model
@@ -2924,7 +4703,11 @@ spec:
- "disabled"
- "Enabled"
- "enabled"
- description: "enabled or not"
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
defaults:
type: object
description: |
@@ -3105,6 +4888,34 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Enables compression in Keeper protocol if set to true"
users:
type: object
description: |
@@ -3262,6 +5073,34 @@ spec:
identity:
type: string
description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Enables compression in Keeper protocol if set to true"
description: |
optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
override top-level `chi.spec.configuration.zookeeper` settings
@@ -3511,12 +5350,241 @@ spec:
type: integer
minimum: 1
maximum: 65535
- description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
reconcileShardsMaxConcurrencyPercent:
type: integer
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ queries:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
layout:
type: object
description: |
@@ -4084,7 +6152,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -4441,7 +6509,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
@@ -4459,7 +6527,7 @@ kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
- clickhouse.altinity.com/chop: 0.25.3
+ clickhouse.altinity.com/chop: 0.25.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
@@ -4505,10 +6573,9 @@ spec:
description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
properties:
namespaces:
- type: array
+ type: object
description: "List of namespaces where clickhouse-operator watches for events."
- items:
- type: string
+ x-kubernetes-preserve-unknown-fields: true
clickhouse:
type: object
description: "Clickhouse related parameters used by clickhouse-operator"
@@ -4930,6 +6997,72 @@ spec:
delay:
type: integer
description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ type: string
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ description: |
+ Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
annotation:
type: object
description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
@@ -5182,14 +7315,14 @@ spec:
---
# Template Parameters:
#
-# OPERATOR_VERSION=0.25.3
+# OPERATOR_VERSION=0.25.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
- clickhouse-keeper.altinity.com/chop: 0.25.3
+ clickhouse-keeper.altinity.com/chop: 0.25.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
@@ -6083,7 +8216,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
diff --git a/deploy/operatorhub/0.18.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.18.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 4ba236761..9288a81ad 100644
--- a/deploy/operatorhub/0.18.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.18.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1051,7 +1051,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1308,7 +1308,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.18.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.18.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index bdffd61aa..b4a61e350 100644
--- a/deploy/operatorhub/0.18.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.18.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1051,7 +1051,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1308,7 +1308,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.18.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.18.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 07293a392..c838a9af5 100644
--- a/deploy/operatorhub/0.18.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.18.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1056,7 +1056,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1313,7 +1313,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.18.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.18.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 17b4a76d6..b416fa18e 100644
--- a/deploy/operatorhub/0.18.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.18.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1056,7 +1056,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1313,7 +1313,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.18.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.18.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index d829ec791..6bf8714d4 100644
--- a/deploy/operatorhub/0.18.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.18.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1059,7 +1059,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1316,7 +1316,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.18.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.18.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index f00a2495b..c6ba38d59 100644
--- a/deploy/operatorhub/0.18.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.18.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1059,7 +1059,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1316,7 +1316,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.18.5/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.18.5/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index dcaa29466..0ae1554a6 100644
--- a/deploy/operatorhub/0.18.5/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.18.5/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1059,7 +1059,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1316,7 +1316,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.18.5/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.18.5/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 68cdb6b04..1b5ee79db 100644
--- a/deploy/operatorhub/0.18.5/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.18.5/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1059,7 +1059,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1316,7 +1316,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.19.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.19.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 59e6b8aba..554dc343d 100644
--- a/deploy/operatorhub/0.19.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.19.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1087,7 +1087,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1344,7 +1344,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.19.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.19.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 8f222c23f..8a5d55e0c 100644
--- a/deploy/operatorhub/0.19.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.19.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1087,7 +1087,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1344,7 +1344,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.19.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.19.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index e2e707b3d..e15ed5650 100644
--- a/deploy/operatorhub/0.19.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.19.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1087,7 +1087,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1344,7 +1344,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.19.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.19.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 3892696b6..4e6dc0a75 100644
--- a/deploy/operatorhub/0.19.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.19.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1087,7 +1087,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1344,7 +1344,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.19.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.19.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index ec1789cd0..59aea4a5b 100644
--- a/deploy/operatorhub/0.19.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.19.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1087,7 +1087,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1344,7 +1344,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.19.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.19.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 7195cfa02..26556ef6c 100644
--- a/deploy/operatorhub/0.19.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.19.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1087,7 +1087,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1344,7 +1344,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.19.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.19.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 1f98f08da..ad1e52461 100644
--- a/deploy/operatorhub/0.19.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.19.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1091,7 +1091,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1348,7 +1348,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.19.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.19.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index a4157bca9..3c6a15e35 100644
--- a/deploy/operatorhub/0.19.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.19.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1091,7 +1091,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1348,7 +1348,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.20.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.20.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 242331f82..c49be6c5f 100644
--- a/deploy/operatorhub/0.20.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.20.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -856,7 +856,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1091,7 +1091,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.20.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.20.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index df819e90b..adf1234c8 100644
--- a/deploy/operatorhub/0.20.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.20.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -856,7 +856,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1091,7 +1091,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.20.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.20.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 073256ded..3b4ea96c3 100644
--- a/deploy/operatorhub/0.20.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.20.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -865,7 +865,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1100,7 +1100,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.20.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.20.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index f45b2db71..e4a3fbc9e 100644
--- a/deploy/operatorhub/0.20.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.20.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -865,7 +865,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1100,7 +1100,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.20.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.20.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 22d878b17..1494196a8 100644
--- a/deploy/operatorhub/0.20.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.20.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -868,7 +868,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1103,7 +1103,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.20.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.20.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 5aa019a9e..b8963044b 100644
--- a/deploy/operatorhub/0.20.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.20.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -868,7 +868,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1103,7 +1103,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.20.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.20.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index bd947f758..371c17ac9 100644
--- a/deploy/operatorhub/0.20.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.20.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -868,7 +868,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1103,7 +1103,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.20.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.20.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 8b1f146e6..2f79a3d99 100644
--- a/deploy/operatorhub/0.20.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.20.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -868,7 +868,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1103,7 +1103,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.21.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.21.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index c54dc3495..35dc366fd 100644
--- a/deploy/operatorhub/0.21.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.21.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -897,7 +897,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1143,7 +1143,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.21.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.21.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index c8a418f2e..bd9b25e39 100644
--- a/deploy/operatorhub/0.21.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.21.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -897,7 +897,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1143,7 +1143,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.21.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.21.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 4b0c0583e..e15ed18a4 100644
--- a/deploy/operatorhub/0.21.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.21.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -897,7 +897,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1143,7 +1143,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.21.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.21.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 05ca09df1..59e193ebf 100644
--- a/deploy/operatorhub/0.21.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.21.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -897,7 +897,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1143,7 +1143,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.21.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.21.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index e0874c1fb..9e1612ef5 100644
--- a/deploy/operatorhub/0.21.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.21.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -897,7 +897,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1143,7 +1143,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.21.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.21.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index d29ae7061..d27ee58d9 100644
--- a/deploy/operatorhub/0.21.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.21.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -897,7 +897,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1143,7 +1143,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.21.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.21.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index d2a286eda..e44670075 100644
--- a/deploy/operatorhub/0.21.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.21.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -910,7 +910,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1156,7 +1156,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.21.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.21.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 5327df3d7..b2fa70c93 100644
--- a/deploy/operatorhub/0.21.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.21.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -910,7 +910,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1156,7 +1156,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.22.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.22.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index c4fa3f332..0cfc820bb 100644
--- a/deploy/operatorhub/0.22.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.22.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -924,7 +924,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1170,7 +1170,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.22.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.22.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index dcc88563d..7ae66eaff 100644
--- a/deploy/operatorhub/0.22.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.22.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -924,7 +924,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1170,7 +1170,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.22.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.22.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 0ec6a96fc..98d0fbcb9 100644
--- a/deploy/operatorhub/0.22.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.22.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -937,7 +937,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1183,7 +1183,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.22.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.22.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index c59af4a20..d146d2697 100644
--- a/deploy/operatorhub/0.22.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.22.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -937,7 +937,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1183,7 +1183,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.22.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.22.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 72c7f07d7..7904c28a3 100644
--- a/deploy/operatorhub/0.22.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.22.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -937,7 +937,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1183,7 +1183,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.22.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.22.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 27aab81e3..a73f62de5 100644
--- a/deploy/operatorhub/0.22.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.22.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -937,7 +937,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1183,7 +1183,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 2f075ad20..13d56cf76 100644
--- a/deploy/operatorhub/0.23.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -954,7 +954,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1200,7 +1200,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 8b364855a..dbfa1f60f 100644
--- a/deploy/operatorhub/0.23.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -954,7 +954,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1200,7 +1200,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 3503c01b2..dc0b64b15 100644
--- a/deploy/operatorhub/0.23.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -954,7 +954,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1200,7 +1200,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 78260fbdd..cd99e453a 100644
--- a/deploy/operatorhub/0.23.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -954,7 +954,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1200,7 +1200,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 9286b5951..0b750107a 100644
--- a/deploy/operatorhub/0.23.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -954,7 +954,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1200,7 +1200,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 88f050d10..e1a772d5c 100644
--- a/deploy/operatorhub/0.23.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -954,7 +954,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1200,7 +1200,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 082f421f3..5f379a925 100644
--- a/deploy/operatorhub/0.23.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -963,7 +963,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1209,7 +1209,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 964be1b6a..947d2a00c 100644
--- a/deploy/operatorhub/0.23.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -963,7 +963,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1209,7 +1209,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index d97ee2c0e..b8c975fca 100644
--- a/deploy/operatorhub/0.23.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -963,7 +963,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1209,7 +1209,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index d854c33f3..e0a0b8b66 100644
--- a/deploy/operatorhub/0.23.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -963,7 +963,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1209,7 +1209,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.5/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.5/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 1ab6e6a00..4d608ebe1 100644
--- a/deploy/operatorhub/0.23.5/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.5/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -963,7 +963,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1209,7 +1209,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.5/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.5/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 372998a59..a6ca83a1e 100644
--- a/deploy/operatorhub/0.23.5/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.5/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -963,7 +963,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1209,7 +1209,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.6/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.6/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index ba8c45e41..7eb06c8b8 100644
--- a/deploy/operatorhub/0.23.6/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.6/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -963,7 +963,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1209,7 +1209,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.6/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.6/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 9c19bcb51..47d8cd80e 100644
--- a/deploy/operatorhub/0.23.6/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.6/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -963,7 +963,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1209,7 +1209,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.7/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.7/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 2d50369b5..47b4167a5 100644
--- a/deploy/operatorhub/0.23.7/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.7/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -963,7 +963,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1209,7 +1209,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.23.7/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.23.7/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index ec3dccad6..70db54978 100644
--- a/deploy/operatorhub/0.23.7/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.23.7/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -963,7 +963,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1209,7 +1209,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.24.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.24.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 9dd812ecd..54f023cc5 100644
--- a/deploy/operatorhub/0.24.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -983,7 +983,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1237,7 +1237,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.24.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.24.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index ad3839715..357722025 100644
--- a/deploy/operatorhub/0.24.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -983,7 +983,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1237,7 +1237,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.24.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.24.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
index 0f003fe28..e2612f5af 100644
--- a/deploy/operatorhub/0.24.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
@@ -628,7 +628,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
diff --git a/deploy/operatorhub/0.24.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.24.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 2672838ce..8efddde2c 100644
--- a/deploy/operatorhub/0.24.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1010,7 +1010,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1264,7 +1264,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.24.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.24.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 96eeb6a15..de068a6de 100644
--- a/deploy/operatorhub/0.24.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1010,7 +1010,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1264,7 +1264,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.24.1/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.24.1/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
index 1662c4051..f8b4e30d6 100644
--- a/deploy/operatorhub/0.24.1/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.1/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
@@ -628,7 +628,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
diff --git a/deploy/operatorhub/0.24.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.24.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 1b4dc5768..df5f75f87 100644
--- a/deploy/operatorhub/0.24.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1010,7 +1010,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1264,7 +1264,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.24.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.24.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index f39161d45..410f33c1e 100644
--- a/deploy/operatorhub/0.24.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1010,7 +1010,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1264,7 +1264,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.24.2/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.24.2/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
index 9bee334ca..fca208895 100644
--- a/deploy/operatorhub/0.24.2/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.2/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
@@ -628,7 +628,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
diff --git a/deploy/operatorhub/0.24.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.24.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 33bd03d2b..a82774d2a 100644
--- a/deploy/operatorhub/0.24.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1012,7 +1012,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1266,7 +1266,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.24.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.24.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 9ad0a1b4a..e7d05abf6 100644
--- a/deploy/operatorhub/0.24.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1012,7 +1012,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1266,7 +1266,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.24.3/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.24.3/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
index f8c3d78b4..6a701bea3 100644
--- a/deploy/operatorhub/0.24.3/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.3/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
@@ -640,7 +640,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
diff --git a/deploy/operatorhub/0.24.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.24.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index fce1877b3..e26385039 100644
--- a/deploy/operatorhub/0.24.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1012,7 +1012,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1266,7 +1266,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.24.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.24.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index ef4b42d1e..3a21e77ad 100644
--- a/deploy/operatorhub/0.24.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1012,7 +1012,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1266,7 +1266,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.24.4/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.24.4/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
index 360770d7e..1ca3ce7ef 100644
--- a/deploy/operatorhub/0.24.4/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.4/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
@@ -640,7 +640,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
diff --git a/deploy/operatorhub/0.24.5/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.24.5/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 5d15f15e3..fd7549d50 100644
--- a/deploy/operatorhub/0.24.5/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.5/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1012,7 +1012,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1266,7 +1266,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.24.5/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.24.5/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 665b15920..c946011c0 100644
--- a/deploy/operatorhub/0.24.5/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.5/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1012,7 +1012,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1266,7 +1266,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.24.5/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.24.5/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
index d82e124eb..31941e3f6 100644
--- a/deploy/operatorhub/0.24.5/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.24.5/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
@@ -640,7 +640,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
diff --git a/deploy/operatorhub/0.25.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index e921e0d4a..51bd378ff 100644
--- a/deploy/operatorhub/0.25.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.25.0/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1065,7 +1065,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1319,7 +1319,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.25.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 5d8973998..cd230a917 100644
--- a/deploy/operatorhub/0.25.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.25.0/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1065,7 +1065,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1319,7 +1319,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.25.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.25.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
index 656f153ef..c359ebcf5 100644
--- a/deploy/operatorhub/0.25.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.25.0/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
@@ -658,7 +658,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
diff --git a/deploy/operatorhub/0.25.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index fb38fee60..ee39517b1 100644
--- a/deploy/operatorhub/0.25.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.25.1/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1108,7 +1108,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1362,7 +1362,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.25.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 5ccb8f862..23afb710c 100644
--- a/deploy/operatorhub/0.25.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.25.1/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1108,7 +1108,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1362,7 +1362,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.25.1/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.25.1/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
index 558aa204f..d5eb0a20a 100644
--- a/deploy/operatorhub/0.25.1/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.25.1/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
@@ -658,7 +658,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
diff --git a/deploy/operatorhub/0.25.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 84359a4b6..ca455a48a 100644
--- a/deploy/operatorhub/0.25.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.25.2/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1108,7 +1108,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1362,7 +1362,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.25.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 0d7dab1a3..412be36e9 100644
--- a/deploy/operatorhub/0.25.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.25.2/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1108,7 +1108,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1362,7 +1362,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.25.2/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.25.2/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
index 4581ff39b..bda1ac81f 100644
--- a/deploy/operatorhub/0.25.2/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.25.2/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
@@ -658,7 +658,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
diff --git a/deploy/operatorhub/0.25.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
index 24f7f4a31..b90f3dbd9 100644
--- a/deploy/operatorhub/0.25.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.25.3/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -1117,7 +1117,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1371,7 +1371,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.25.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
index 51ae50227..29b0be1a4 100644
--- a/deploy/operatorhub/0.25.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.25.3/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -1117,7 +1117,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
@@ -1371,7 +1371,7 @@ spec:
description: "name of `ClickHouseInstallationTemplate` (chit) resource"
namespace:
type: string
- description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`"
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
useType:
type: string
description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
diff --git a/deploy/operatorhub/0.25.3/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.25.3/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
index ae70976fd..75a0a8b43 100644
--- a/deploy/operatorhub/0.25.3/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
+++ b/deploy/operatorhub/0.25.3/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
@@ -675,7 +675,7 @@ spec:
properties:
name:
type: string
- description: "by default, hostname will generate, but this allows define custom name for each `clickhuse-server`"
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
minLength: 1
# See namePartReplicaMaxLen const
maxLength: 15
diff --git a/deploy/operatorhub/0.25.4/clickhouse-operator.v0.25.4.clusterserviceversion.yaml b/deploy/operatorhub/0.25.4/clickhouse-operator.v0.25.4.clusterserviceversion.yaml
new file mode 100644
index 000000000..c0f51c10c
--- /dev/null
+++ b/deploy/operatorhub/0.25.4/clickhouse-operator.v0.25.4.clusterserviceversion.yaml
@@ -0,0 +1,1660 @@
+apiVersion: operators.coreos.com/v1alpha1
+kind: ClusterServiceVersion
+metadata:
+ name: clickhouse-operator.v0.25.4
+ namespace: placeholder
+ annotations:
+ capabilities: Full Lifecycle
+ categories: Database
+ containerImage: docker.io/altinity/clickhouse-operator:0.25.4
+ createdAt: '2025-09-26T16:35:39Z'
+ support: Altinity Ltd. https://altinity.com
+ description: The Altinity® Kubernetes Operator for ClickHouse® manages the full lifecycle of ClickHouse clusters.
+ repository: https://github.com/altinity/clickhouse-operator
+ certified: 'false'
+ alm-examples: |
+ [
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseInstallation",
+ "metadata": {
+ "name": "simple-01"
+ },
+ "spec": {
+ "configuration": {
+ "users": {
+ "test_user/password_sha256_hex": "10a6e6cc8311a3e2bcc09bf6c199adecd5dd59408c343e926b129c4914f3cb01",
+ "test_user/password": "test_password",
+ "test_user/networks/ip": [
+ "0.0.0.0/0"
+ ]
+ },
+ "clusters": [
+ {
+ "name": "simple"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseInstallation",
+ "metadata": {
+ "name": "use-templates-all",
+ "labels": {
+ "target-chi-label-manual": "target-chi-label-manual-value",
+ "target-chi-label-auto": "target-chi-label-auto-value"
+ }
+ },
+ "spec": {
+ "useTemplates": [
+ {
+ "name": "chit-01"
+ },
+ {
+ "name": "chit-02"
+ }
+ ],
+ "configuration": {
+ "clusters": [
+ {
+ "name": "c1"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseOperatorConfiguration",
+ "metadata": {
+ "name": "chop-config-01"
+ },
+ "spec": {
+ "watch": {
+ "namespaces": {
+ "include": [],
+ "exclude": []
+ }
+ },
+ "clickhouse": {
+ "configuration": {
+ "file": {
+ "path": {
+ "common": "config.d",
+ "host": "conf.d",
+ "user": "users.d"
+ }
+ },
+ "user": {
+ "default": {
+ "profile": "default",
+ "quota": "default",
+ "networksIP": [
+ "::1",
+ "127.0.0.1"
+ ],
+ "password": "default"
+ }
+ },
+ "network": {
+ "hostRegexpTemplate": "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$"
+ }
+ },
+ "access": {
+ "username": "clickhouse_operator",
+ "password": "clickhouse_operator_password",
+ "secret": {
+ "namespace": "",
+ "name": ""
+ },
+ "port": 8123
+ }
+ },
+ "template": {
+ "chi": {
+ "path": "templates.d"
+ }
+ },
+ "reconcile": {
+ "runtime": {
+ "reconcileCHIsThreadsNumber": 10,
+ "reconcileShardsThreadsNumber": 5,
+ "reconcileShardsMaxConcurrencyPercent": 50
+ },
+ "statefulSet": {
+ "create": {
+ "onFailure": "ignore"
+ },
+ "update": {
+ "timeout": 300,
+ "pollInterval": 5,
+ "onFailure": "abort"
+ }
+ },
+ "host": {
+ "wait": {
+ "exclude": true,
+ "queries": true,
+ "include": false,
+ "replicas": {
+ "all": "no",
+ "new": "yes",
+ "delay": 10
+ },
+ "probes": {
+ "startup": "no",
+ "readiness": "yes"
+ }
+ }
+ }
+ },
+ "annotation": {
+ "include": [],
+ "exclude": []
+ },
+ "label": {
+ "include": [],
+ "exclude": [],
+ "appendScope": "no"
+ },
+ "statefulSet": {
+ "revisionHistoryLimit": 0
+ },
+ "pod": {
+ "terminationGracePeriod": 30
+ },
+ "logger": {
+ "logtostderr": "true",
+ "alsologtostderr": "false",
+ "v": "1",
+ "stderrthreshold": "",
+ "vmodule": "",
+ "log_backtrace_at": ""
+ }
+ }
+ }
+ ]
+spec:
+ version: 0.25.4
+ minKubeVersion: 1.12.6
+ maturity: alpha
+ replaces: clickhouse-operator.v0.25.3
+ maintainers:
+ - email: support@altinity.com
+ name: Altinity
+ provider:
+ name: Altinity
+ displayName: Altinity® Kubernetes Operator for ClickHouse®
+ keywords:
+ - "clickhouse"
+ - "database"
+ - "oltp"
+ - "timeseries"
+ - "time series"
+ - "altinity"
+ customresourcedefinitions:
+ owned:
+ - description: ClickHouse Installation - set of ClickHouse Clusters
+ displayName: ClickHouseInstallation
+ group: clickhouse.altinity.com
+ kind: ClickHouseInstallation
+ name: clickhouseinstallations.clickhouse.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ - description: ClickHouse Installation Template - template for ClickHouse Installation
+ displayName: ClickHouseInstallationTemplate
+ group: clickhouse.altinity.com
+ kind: ClickHouseInstallationTemplate
+ name: clickhouseinstallationtemplates.clickhouse.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ - description: ClickHouse Operator Configuration - configuration of ClickHouse operator
+ displayName: ClickHouseOperatorConfiguration
+ group: clickhouse.altinity.com
+ kind: ClickHouseOperatorConfiguration
+ name: clickhouseoperatorconfigurations.clickhouse.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ - description: ClickHouse Keeper Installation - ClickHouse Keeper cluster instance
+ displayName: ClickHouseKeeperInstallation
+ group: clickhouse-keeper.altinity.com
+ kind: ClickHouseKeeperInstallation
+ name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
+ version: v1
+ resources:
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: Endpoint
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: Event
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ description: |-
+ ## ClickHouse
+ [ClickHouse](https://clickhouse.yandex) is an open source column-oriented database management system capable of real time generation of analytical data reports.
+ Check [ClickHouse documentation](https://clickhouse.yandex/docs/en) for more complete details.
+ ## The Altinity Operator for ClickHouse
+ The [Altinity Operator for ClickHouse](https://github.com/altinity/clickhouse-operator) automates the creation, alteration, or deletion of nodes in your ClickHouse cluster environment.
+ Check [operator documentation](https://github.com/Altinity/clickhouse-operator/tree/master/docs) for complete details and examples.
+ links:
+ - name: Altinity
+ url: https://altinity.com/
+ - name: Operator homepage
+ url: https://www.altinity.com/kubernetes-operator
+ - name: Github
+ url: https://github.com/altinity/clickhouse-operator
+ - name: Documentation
+ url: https://github.com/Altinity/clickhouse-operator/tree/master/docs
+ icon:
+ - mediatype: image/png
+ base64data: |-
+ iVBORw0KGgoAAAANSUhEUgAAASwAAAEsCAYAAAB5fY51AAAAAXNSR0IArs4c6QAAQABJREFUeAHs
+ vQmgZ2lVH3j/r6p676abpSNLE2TrRlwSQBoVtVFHiUGFLKOEKCQTTUzGmTExhmhiSJBoTMZEs2KQ
+ BsWNREWdyCTOGOMEQZbI0i0NyCaIxAB203tXvfef33LO+c697/+qqqu3V1Xvvq773fOd3/md5Tvf
+ 9+57/erVajq4DiqwoQLnvf4PH3Pe4ekx29P0oNVqfTEgl6xWE8eLV6uVxgnjCjLHab0TI+ZW00PW
+ 6zWgq0+up/XNkG+edjiubt6Zppu31tOnd1brm7fwvL1e3by1Bf165+b1+vDN02r7Jhh+6MZnXfYh
+ jAfXQQVmFUC/HVxnbQV+aX3Bg875xJOOracrD21NV00705U4XK6aVtMTVqut81WX6BAcNpOOIIye
+ x4iJFMfDhmry8CIwRh5mZBHfppH61XT7znp672pavQeH3g07W1s3rLbX77npQQ+6YXra6rYNXg6m
+ zoIKRPedBZmerSmu16vzfvnGRx9a3XXlar2+CscEDqXVVTgSrtzaWj2SZRmHURRJh0uf9/EycNE2
+ cVpxPg+jLLMOJczPRiiPd0j1Q634eMgtr/X693CIvWe9OnQD+G/goXbXzs4Nt3/FZR8BxwaDJcGB
+ fLpW4ODAOl1Xbq+4cUBd+Mv/4/Om9bFrcF5cs1qvvgSvM5fpMIDN8tCxzDcaHjoCiFkyn+psur/f
+ sOaHneLfdHgxRs7zcNxZfwrjryOPX5u2pl+78VmXvgsyvgo9uM6UChwcWKf7Sq7XWxf+8u997s72
+ 1jWHpvU169X6S/Dl3GVKi4cQrjp8JO11aGnPGxGHlw+ztPeh5jOtTjHhfdj50AgX8zcrHib8Mg9K
+ 2W8a49DJw2c2Jmkf85Aib/LnGPxw9ik8/joyODjAeu1O4+eDA+t0WzwcUBf84keeslof4uF0DRbw
+ mTgJ8I1xHArIRYdHjD4c4piAntdm3JnxhjU75BaHF7PH98Q+hfHgAFMnnJ43d/HpGfvZEXUcUOtt
+ fHm3tb4Gp9Izp62tBzF5HU4+pVQLnkn90AIg5ufLvPnQIp/gfgDRHHf6vWExHR/abWxvcsgIB9hO
+ HGBbB19CxvLv5yFbdD/HeFbGdu7PffSJ07T9l7ZWqxegAI/YdJioMFLsPkzqsMkvxNrh1Q81486O
+ N6xNh5fyzy8rd3Y+tl5NP74znfPKm7/ikveelY23z5M+OLD20wL9Xx++7Nw7tl+wNW29EBvnadxM
+ vOrwydVq8/FKFbiDN6xT+l6Zqje/gectWINXr849/JM3ffGlfzjXHkgPVAVyCzxQ/g/8vnZ9zjmr
+ D/0JLMQ34bh5ztbW1jkqSjuU/EYUpeI8JvIw89dxB29YqkP7co/yyRxeszcs2vcLMip7Fwr+Szs7
+ 61fffM5DXz89a3WsQw6e798KHBxY92+9y9uRf//Bq7d2dl6IDfH1+HmoB0uhw+g4h0+uVjvMDt6w
+ ol44XOrwQTF3ffmHOZaPh9iuw03FX9wC1w89PP8BiH9ie3341bc++7J3LCwOxPuhArkF7gdXBy6m
+ 137wM86Zdl6EHfNN+N7Uk+JVaVaY+ZuT36S0+XKlDt6wZvWSsOkQupfesDa+qcEfjsd3Y0lefezI
+ zqtvfdblH98d1MHMfVGB3Ab3BfcBZ1bgtR958Dnru/43NP//ii9UHqI3AehYfB9GsQwHb1h8Bbr7
+ b0B5OOWYdd00ngp/8GBwfHrwbWe988nVtPXPp/U5//zTz34Qf+7r4LoPK3BwYN2HxZ1+9n2POnJs
+ 62+gyN+MQ+pCHk/jsIrjiodUuw7esHgmtC/v4hCqL+Narepx0yF0koeX1qP5K04+BG//slCrxvn6
+ dBO4abp1Z1r/yLHtwz94+1c/5KM0P7ju/QrMd8u9z39WMp772g9cubOz/bfwd9z+PPr6SB1C0eTj
+ 0OIR5i/7VCgeXrl52nzhc7XikBOvCYZ5s9Mm77JQ9tf9buQHYMxrmy5kEYvRccggPGw+dMwytvpM
+ jsMhD4nZWKztoR8meTjlCJjy2zRu8tNo67HzB490nG+XDzP+0C4OWczvrNdHkeGPT+vVD9z8Jx72
+ ngY9eLwXKsAaH1z3UgWO/NT78aMI67+Nln4uvkeF357SN7G3Zx0C+Rk6Dp8MQZufQjuUtPlypTgv
+ 2pgQrr25Le0Wfsr/DGd78na/iqnczH+SXrhZehmgrOa3xSGx642FbvFH7jkCrzjbaH9EbLgW/HnY
+ nZKfTh+8u3g4XxHjMeQ8tCjiz860Wv/89tbW99/2lQ97a6c9eD71Chyny06d9GyzPPxT7//y1Wr7
+ b+OM+vI8o9zS8Zk3Dods8jo0UCjhUs8RnV762aFSZ0k9EDc/ZFKMZW32fU1Oih+BzXG749IhAmLH
+ IYNys+nQYVSuy4aRuzzy3zUWa3sI/L3ip9HWY+fHJOPWxfl2+TAbb1iUkQj+GBc0/w9+LOL7bvnq
+ z/jVZnrweAoViM4+Bcuz3eQl661DV33guVvrHRxU09O8yWLPoTb4chB3NG0cGtnEdQjs0rug2vx8
+ bIeNtkCulDY11TGhcfdhspefmp/x296niXkH/4jLcTS/s/QyQONn99i19+jNR3kzgg3Xgv8e+en0
+ wetDqR2ynM/1Iz7k/oZlmog34Ov1zlumaev7bn725b+ABTz4LRIu0t26H6fL7hbPWQU+8tPv+Tz8
+ FpeX48u+q3OvqADaVD5r3KMHb1izNyAUqW91Nl/JWchN46buCtyMH/XfdbjA9oR+TsQfcQpGv+2y
+ vxO+YUVcjg8B/cb26tC33fbsh/23RnXweBIVODiwTqJIBcGPJxzZvvNl+Ez5Lfhhz63abQRsOKy0
+ HTmvy9um3nByG5U+UCnHWPiiwQP2zHgDWvAu7RZ+Bp8JLR+8YakOi8Nozzc14Vx3rVrIJ37DQp3x
+ wUMOF355xPrlq3Mu/Lv4e4uf9Oof3E9UAXftiVBnux5f/h258n3fggZ7GX4h3oN5JujNAA/svTgj
+ Nh5aauIBQCXbl2+SFocPCDcfKgs/sCUuAtEKDTGWNfwKJ4RvJ8Ufh2LmOYs78+n8s0IAnXn0Ee7F
+ t2lM+01ji70eA3ev+CnS9tD5Mc24dXG+Xaf4hsUCgQXrtLPzyenQ9F03P/vhr8CCHnyZ2Gq76TE6
+ e5PqYI4VOPJT770azfVyNBN+i6cPiTqEoudUqTgtYtBnUrV5bm42JwjqsAgZEzLPWx0uMV/4hIWD
+ Oa7xLu0WfgafCS3b3qfJmHdejmxpp7hVj4h8kUfmozE2f57u3uQ+BOgty1gj8PLXRvsjYsO14L9H
+ fjp98O6Kl/NZV+JDVl+kyHllFgMSrcMNeC1j8mDE7zb7b9s7h77t9uf8kd+Q6cFtYwXcnRtVZ/nk
+ j/3O5UcOb/9jvLd/I75XxXbjadWH2FSeVrWWejR1HW4G4N4OF0k+BId905MP1zgsJJbDDEtxCafw
+ hBey2YdlTDOu4Xcjv9LtuN1xDb+sS9QnHGlzwv9shE5+N41pv2kMztkQuBl/+tvEjzlWk3jF3ccZ
+ cQidn3aJ4Xy75D/XGfPib4fZcIP6EacJAXHLutFOpFCvf2xaHX7xrX/y4K/7qCKLm3fEYvKsFv/z
+ +vDWx977bfghqpegOJd4U2aTe5PXIcQmywrycBgwTFMREyqo5TocdulddR1CfGyHjdzs8hMTwu0+
+ TPbyU/Mzftv7NDHviGPE5Tia31l6GaDxs/vYtcqLm5Zo8W0aqUd8wsVYh8yMOIQFv3Z/2m/ix5z8
+ b/LT+YN3V7ycrwzowPI9ecMindyRblp/Gs9/79YLH/4vpoPfDtFXRFWfTZzNwuGffN+XTjvb/wab
+ Bf+qTG4a7rHYXhxjk6pFtSm0B122pR7lTZ4AYAhePAVr8HPCXavNKpEI+7c/ieVQcTVFuJ/zhX1Y
+ ajgpfuXJ+O1/Fjcd8YrRccjA87j3w0b+sAMrX+pp3kftVsxsGoHbdQXuXvGzixwTnZ9iYjjfLh9m
+ sc6YpzwyKxrXg/0gXgGNC7mm05Pd/Pb2tP7m25/zyIMvE1EtXtF5Fs7a+2vwd/5W2z+Ipvmz3sw+
+ VGK3oizonjgNduujaqV3cx+8YbVu0m4ch5E3edZpwwh8HXKoqzd52Dfaelzww0DrdUp+ihQPe/Fw
+ vo7bwPEwwgc3lNQYnVkMCp9656N2SR75CXeCDx54orODLxNRBF6s79l9/eT1Tz60fegX0ECPc7ex
+ 16P5tFksq2/UZZTdRd5UllXEpZ7NySbmvAG4W+4tX3rZN33YOZ6FHzDJTkTmD/fDX7O3f98HX9ox
+ zgU/Jua43XEBIELHIYNyo8MC+tkIrfxsGrVpwbdpLNb2ELgZf/rbxI85Rku84u5jo63Hzk+7VHC+
+ XfKf64x58WcjSU53qB9x4g0FcSHXdHoqN3yA3c76evxWiK+/5Wsffj0mztoL36o5e69DP/7ubzm0
+ c+gt6PPHae+hN7xJvTnZO9qMflDXzjax9FE/EoSMQc3JCdsTo+0S/KlP/uBA1ya+j+KjOa/0o2YP
+ WX7kfre9cGTwNYsfU5bpF4JgmYcdaj7ycBwRSMVBO2gMtJPgpaA8FnmJB7rZGPYzfLNb8qe8F955
+ wf/J+Mk4Mdal/LweGSd18idQ1scedBht1GNS5eEnhVjfkEPRB8SbvHJCRhGstlZPXk/bb7nglz72
+ zak5G0dX52zL/LXXX3ToztWPo/me610TZXBvqCmzubwXSzFvLjT1bK+qydnUgqn5ksclNs+uzUQD
+ XjJsmyTmCx8w4QRPR1aU386XOPLHNfjSjpvJ7gUJojlud1zzOLQL04XeJGQfh47fRLIuG8Ys5Kax
+ WNtD4MSLeGcjYMpn03gq/MEj77Rvl/OKwwjzlPOQIWy4Q/3wIT3LnjgBAsdpRa4H3HiZz36sB8/r
+ brnoyDdOz7r8FmPOnrs79uzJd5p+4t1P3dpe/wx+Uv1x7oXctNE0bH58cLMNPZpmtom7PopX+mwx
+ dWU/BQBsmz4+c+amzyWQXwrk08B4SpzFEQAMjXdpt/AzP4RItylfz5tf98D1edcn3DkuQ3ffx64V
+ bmw+iED3LS4ZeMXVRtWDPJuuBX+eEqfkp/MH7y4exZGRwyBk9U2K4ol4I0Hz+NBi3SirAvJjGrMi
+ /108MghW6d+/s9r6+tue84i3afIsuZ09XxLiL/9t/fi7//rWsZ034ueqHpe94marnlMT9c+Eu5sq
+ mwnNNnqoui16D5vQzWh7dtOQsyk1q0Ci23h4hNzHWfOGA/GXgnax+Zf29FunCsNs8TMqybbXLhHe
+ 846P99hkgedMOmRWmw6THj/1XXb+ES/NRScm4xKfI31EXnzU1fNMXI4AVJ54nvnbZBd48eaNuOBL
+ e6oyDzwJaRn54aPnMfSAQeF4og4hh6IP4rEf0eNGP61+8kMZ33Pd3n7j+b/40W9P5NkwssZn/vUT
+ H77s0M5tP41V/srxhoG01QsuAXqTTYAejebTBDAcCQx5tz7KV/pssWYniOXyn9tI/MGBgXrDPRY+
+ pnscA4fNAjuns7Bb+NmMs/28HhlHhjPnH3FYLzkedw2x+aPAGGJzA0gviruP1DOfNtofkRuuBf89
+ 8tPpg3dXvJyvyPEY8ji0kiTiDXg/tLjMlMUjP6ZxPaI+YV4VEp4S9a4PPUF+3W1bF//F6Tln/j9H
+ Fl2ZBT4Dxx+7/plb6+knsQGuyB4bm1arzVPCibtbvFnYFNo0VJdCvaNmg8XQR91CUXo2VfB0B+Uf
+ k2pZ8YsQE+FXouMqfIQJx6JTXCYIeQNf4xEo5O53Iz8AY975z2URidJxyCBdaDMKn/lwhFZ+N43c
+ jCrchrFY20Pg+6FW/jbxY07+N/lptPXY+YNPOs63qw4hrjPm6xBSplrOSIv6OGQMBBoP8lMDrIK/
+ 3PAhDm/y46N4IOeF37f1kdXhQ887079EPHO/JOSXgK+6/nu2ptWvoZGv8KbinuAhwhG3ehiy9WgK
+ bR7jBWyyzdhsYceuKX3QshnZXHIkQMmylz75qceF5k18H1uYww/tS0G7FOl38LK5tSk063mbDZwP
+ VQCoCN7hn3OOq9ulQ7HE5iYyr2Fv/10WD4CzMeokXNYhR5JWHcPDCfDkmfGnv012onc9gt3+wn/y
+ UJd5qE4loz74EK7izPoCBIXjifUNORR9EI/98M6LPGEniX6GTAQvfE/2iunYMX+JiN737Jl3PzMT
+ ++X3nbv1B3e+arW19Q3+DBZpeu2jedhEWFB3mVd2pndTeC+WYt5c3BRqvmgMEoYsWjVxby7z7NpM
+ 2eSyD7+gzM1ReJrzCrz0Lf5wX3YznC3DfL65NvIrj47bHdeMf1YIlCE3ex/h3XXZMKb9prHFXo+B
+ u1f8FGl76PyYZty6ON+u/fKG5Tqw7jzi1j9965ErXjR99erOFuoZ8Zhb4IxIRkn82DsuXK0P/RJ+
+ XdWz+mmiTYnVnG1O7R6XIM6K0MchU3p2AXAauIm7PkpX+tyM5A07QSyX//jMmZs+WOTf8IwrD41A
+ lB/rbd/zWtgt/JR/8uCy3PMZ8wboHjjjPQ/cLD3bGb24x+bP9fAmzzptGHOd2qg8ybPpWvDfIz+d
+ P3h3xas4mG/EE7K/XMNshZl6QFkuKPLLOS1j8nCUXgNYiVvyBEBeBw/Ecc15/vOt5x35mumrPuPW
+ ATj9n86sLwnxGxZWO4d+FV8GPovN0ZvccjZNNEPritE7rRnUI2y6aJaQbTaaTG0wbxY1He3k1wDx
+ uGnhHx+8hp7qgc/5whvuwBkH7R0IDbVJKq7Gaxw1vgbf8O9NBL1g5h3+aee4On86FIt3nx3EfdiT
+ b56XeMTKPOx2I77ZyV/3oAWL+iUuR+Aqz+TPcZNd4DGMS+vZ8g5NxunIwz/rg49ZfZQZ9TCEwvFE
+ HUIORR/EQ1cRJp9EkH7tZ9STWF7Si7fq+azzbz/6q9PPffxyI86MO2t8ZlyvfNcVq0MrHlaP1xKj
+ ebXqHHHpTQJdMN4wMGlg6BMezSd7GRoY8u43EpnTgfjKzO3reXsQT/nfpTcP9Ya3uDGR08NPx/W8
+ FnYLP+U/CHfnM+wdR4bDuriOngcOE7O4DN19j82f6+HNG+UHWrx9zHVqo/MmcsO14L9Hfjp98O6K
+ l/NZV+JDHodWkkS8ATePDxvWTYcMeeSnBtQjD7/OEzi6k373obWLx/H/znq99WW3P/eRH0m203k8
+ M96wrn3356AB3oA3hcdXc3mx3AVc5GqK1gzoAS1yDtFE+druXqpuU4/O+cvcD6P31FQ0cFOyRcxT
+ 9pJpFk1NCJJIuY8tzOGH9qWgXYrmK3vhCPTFeZsN3MEb1mIdWCoV1OuherX6RSU1uM7A4aPXFQSh
+ xwCF6x7rG3Io+iAeGo62IE/YcV5+hkwsL8UhXtGFrInHT9P2G8553Uc+x8jT+46MTvPr1dc/fbXe
+ +Y/4ntWltZhISW8q0XTMUG8S3Kw8FCTjVg/Uu0k2v3EAKEDwsGlCVpfagfjGNEsbdtS7nYf/kMUr
+ vW/iFdxLM4s350UbS0eHEY/TWdgt/Ay+juv5jHlFZHHELX/QRKIpLvOQbd5yHWKsQ5M0+KO4+5jr
+ 1Eb7I3LDteDXbkdgp+Sn0+8VL+crcjyGPA6tJIl4A94PLZUveeTHNK5HHn6dBxrhWS/qdx9azrvx
+ AK/1lt36xqPrra86+rwr3pysp+N4er9hvfpdz8Fh9atYvEv1GSYWUYuuJmiLLJmL2ZrBQK2b1lTw
+ 1gwhqzl32aMLB311ScDUVAQ4Lrpw15Z/yY6nGic2mdAKaBGvFfZL+4qfflKkn8HL5u6HSfmf8dte
+ fMGbcRvmPOzO/OlQknef/OZt2Nt/l8UD4Gxs8TBe4XMkaeiTP+UZruE5P+NPf5v8iD7ySgfEBV/y
+ UJV54ElIy4gXH8JVnKkHDArHk3kljxShz3o0O3sQQfrd67CSftA5zlk9pksPTzu/et7PfvRZoj1N
+ b6fvgXXtdd+wWk8/j8PqQtbebyZoGi5SytF0pYdi6DFrINXsTcnWm4f65BVgZs/uCLskCBkDetSE
+ thdADsq/9MlPPS7Em/g+io90vBgo/dC+FLRL0cCyF45AX+VfCdu/NgHNBDNR2YtuUVdSVRy0Y0D2
+ SxWvYU++eV4KG5jZ2OIpfLNb8qcsP4nLMfzP+NPfJj8tXjz6Un4t75rOPD06T+DwIX9Vh9TDEAri
+ XGfWI+ujh5CzHs1OPim3+snPkCMs13vQhTxwsR4XrraOvf78n/vgN6Td6TaengfWq971HVj+n0ST
+ HM6C12c6Ni0m/RmHzYFVLNnz1mOyHgImOO3BTruQ46Fkm6Ve9CYQPmjZlckjyJBlL73jCQbAwy8m
+ nI9H+wtUxqWuFxA32oXfni/VwpHBF3nNx3iS3/ZSBO/wT5Tj6nbpUCyxuYnMa9gzsHle4gFwNva4
+ E58jSUOf/CnLT+JyFDzyxPMJ/QS+uPlAf8GX9p523VjZISM/fAhXcaYeKChc96hDyKHog3jMK3rc
+ yNPqJz9DLpTijbBp1eInxv75sDoXP1L9k+f93IdPy19Tc9odWFuvetc/xv8J/Mf4jMH+qMsimoZN
+ hlnJsWgEWfZoPSbrgfOWbW8e6mXnB+Gt73xkx0UC4YM2tontBcCtxSd98lOPC/Emvo8tzOFHuwA2
+ EXi4320vHBl8zeLHlGX6hSBY5sGJoO9xCxgK5ktQqzNFXj3+ZV7KB5jZGLyyyzrkaELex3UCfOUJ
+ ixP6ASbjLQfkD/9pT93AZX04on74EC7r48pUXR1PrC+A5tGDApQ78nc7OpTc+gJyvalJ75v4Bp35
+ W/3sn37xBz8Jj+FHzvvZD31XozgtHk+rA2vr2uu+H2vwHaosHvqlzyhcTC4SFPkZhk3Hy7JH6zFZ
+ D5y3bHvzUC87PwhvfecjOy4SCB+0sU1sLwBuLT7pk596XGijxPexhTn80L4UtEuRiQzegzesqAvL
+ 2+uigu2uN2unq+llV9OuLyocMI7gwUfnH3qpta7Hf3PPOJM3HMpPi1N+hlyoZf+1+IkZfQuBxxV5
+ VtPLzvvZD34/9afLxchPj+va6/4P/FPL/9TF3h1yfQZT83BNkFosGtHSQ+6fadwLLsGAR/NpQoa4
+ oYlCLj+lJzuu0guNKNwUmjcA9+Z/l14gxWd4xsV4TF/zCsd68zdexiF82C38jPw7zvbdEXEmigGy
+ 48h5jFEW42Le8Pk916FtIqLFt2nMdWqj60uLDdeCP9e9NilMNvpr/KoLefoVvLt4hEtGGIQ8Dq0k
+ Cb5I1DyoI/Bql+ThCLoYUBfok1ZURRAS9eZJT1YseORn4CqP9KsV4D9Bvf72O//0Z/6zGdc+FViX
+ /X+96rrno6qv4T8P78XO1V2E7y7QptcSh+xNjTTVBdz8ufkwZ6BrMNN7E6uJyEOg+GowT0yLYKln
+ U+GjNn8EUP7LfeQh+/ALXdoVPmCKI/Ut/nBfdjOcAvRt8JlwyAGqPLJOzn/gWrwVRy8EN1/Wr43A
+ KtxNowtNwyhwGyOs2RC4e8XPjDiEzo8pxq2L8+3yIRDrjHnKSIBPQgWN68F+0ISAxoVc0+mp3JjP
+ fsiKeiaPPPgmPdwWjx5iQu5oZ/14oAz+Nf796Wn9wrv+zGNf0yj35SNz2N/Xte+6Bl9z/wr+IvNh
+ r8bmkL2ZYjGREeWOlx6LMzYdQOoF8w14O2RKj4fZJjaP7aN8pQ9ad4XtBKGf5n+X3jyKm4/k05CH
+ hsRZHAHA0HiXdgs/I//O3/MZ8+a336XdqIf1GW9I86FvHsQ3Nh/SAVJl7mOuUxvtj8gN14I/1/2U
+ /HT64N3Fw/mKHI8hb34zot5w80T+SJyyeOTHNK4H1iNpMTaCkKg3jybytuSBrHWTH/sz7zx+x7E+
+ Nq22vuqOP/WYX026/Tju7+9hXXvd52Oxf5GHlYoaza5CxiJkUXPxOdaicPO2xaJi6GFp4KArfTRD
+ yPOm6vxF7wfhs5XdFI6LLoYst5IdjwLgreWXdrN4iWE+9EN7ESVvipQHr3EE+hp8A+fmh14w8w7/
+ tGuHTNa94qCaAZmPaF7Dnnxjc5V/YBR+jmEvu8TnaELex3UC/N3yA9aMtxwov5Z3KAZu1E/1wXoo
+ n6pD6mGo8lB/vL7KejQ7+aTc6ic/Q46wHH8sQ8XR6lf1EJ0QZke8lEB7eL2z87ojr/3A05NzP46K
+ dT8GNr3ynY9Fo78Zv874IdoMKH6NGwL2G0A0BfSUOz4/0xhHPUC1eJbdo2gGNoXsRWRgyLbveqh5
+ lT5o1QZwIEcC4GY7t2AEUHpiCOe8HmJgPJ2GeS1xjTfsK89FHDU/w/V87H/EoTAUl+No8SkO60ee
+ IfchNn+uR20eYMgm3j5yEyE+4WIUP3k2XQv+e+Sn8wfvrngVR0YOg5BP5zcsZaM8pk+uDx2++s7n
+ Pfr9vRT75Xl/vmH96A2PQIF+BcE9hM3CZp2NrJ6LW3WUHu1fzRV2ibM9zbAZ0rwequdCn/6M17YS
+ X7dnXGGX8YQs2tiGjksA3Fp80ic/9bgyTzym3SxeYjIOnVohi1fhzOwEF44R+Rp83vyWGRf0gmVe
+ qaddi1tATFUcVEfihMbV41/m5fpEvMBLDl7ZZR1yJGf6Df6U98I7L5glf46b/Ije+Sa982t5hyLz
+ MjPDsodxWCWPx1ALt5/fsCqP9fSQ6ejR/zj9wu9yD+67Sy26r6L60Rsunrbuwj8UsfVkNV1uhj5u
+ CNhvDjyM0GTQ6w2B3UK7kJPPekzWg2GG0375xhE8GpZ60QdBDYpDDsI/NHLoOClZzviCxXFT6HEP
+ sRwov8LFG4lwLV/KCz/lf8a/zJduzCPz4HW5ch5jlIVhZLx6Xt5yHWLUOgAjvk0jcIqzjeKn/aYr
+ ePOQyfGU/HT+4N3FozhYh4gn5HFoJUnqIbNcwOWhxfJSDkUfwEoc9Z1HBpqw3odpIqyAQcBsP+pI
+ feWRfiP++TyBwl5357mXfOH0dQ+7Wdz75La/3rCu/eB509bRX8QPhvqwiiKzWbW4ObJ4YzVVylz8
+ WfHdFUOvxWzN4FUNPYbSpz8vcu+COX8Lg/HIXoOajk+Oiy7YfZbltjULtbpafmk3/AUm/dC+4idv
+ ivSTcdMrFQT6GnwD500EvWAmGv5p1+LOulccVMMw5wnHNeztv8sKG5jZGPbCZR1yNCHv4zoBnjwz
+ flhK3mQHXcZXDogL/8lD3cCN+pF5HDKeB1JUcgcCx+M6MhDz6EGByR35u50ZcA+70OehJ3XcxDfo
+ zN/qZ//0SzoAy0/uBymsJ+dq+uxz7/z06ybuyX107Z8D6yVr/JjVLf8OAV3jmrIJWFuvwmxkAVX0
+ UUm/EcRnlGaXONubb7ZmwZNrSJyajxMAJq94Qh5xtTAK71bINxvbK2DcWnzsWlxDT7X1fd7xbPCj
+ rhcBDTO84ktexaEuJav9Vfwl2575AqEEyl5htrhVKBkaRxPtNudDkdewJ988L/kHZjYGr+wSn6MJ
+ eR/XCfDkmfHDUvImO+gy3nJAXPhPHuoGzvlaRn746Pyqo/C4QeF4og4hh6IP4rEf3nnRT6uf/AxZ
+ EKIUL0fRhTxw9m+91ku8thtxE29G9+/0ZedcuP0zE/fmPrn2TSDTFe/8l1ur6TkqHoozPiNodSWz
+ mv7MJMCshJ63XkuMZvPqkdF8XKPBi0kDQ2/Z+vATeAHF1+3JT1nmfghZtG7fEW+Th958waB4M78+
+ Cr/0o10ASwXAeDMdA8teODL4cn7Dr2Xbu4dNVPaiW9SVVFkPPmuX2C9FXsOefFHPmHf+ES/mnJ/t
+ ZZf4HE3I+7ii8HvhOX/SfsCa8ZYD5dfyDsXAtXjh6Ux7w9KbHgqI/7723Cd94IerLg/wA9f0gb+u
+ fddfwcn5r9kC1WTRrP7M4ab3Jtkccn0GU/OAB/YdLz2bmLz0Mxwp/wGP5tNEA4ZcfkovcxNmmDTL
+ TIjTZYflf5c+UImPsfBFgwf5iQnhel6eL7uFn5qf8dt+FCbqx5DKTdatJiIOx+2CxvNyiM2f67Hn
+ YRLupEd8fRQ/eTZdC/575KfzB++ueBUH6xDxhDwOrSRJPWTAzZN5WQ5FH8Aah22lywcRiNh686Qn
+ K4ALmKJDXFpvxZf+m9+Iv/IL3EgrA8BPlq5X33r06x/3b2b+HgCBeT2w1yuv/yz8RsS3IJALajMh
+ olgij4vmJS6bchZ8zBfPEqdVxJqSD4ZUjwdOWLY+F9t4AcVnO9uzaYIHpt5UNYCOfuIwoD4clH/M
+ JI/Vzqv0CpB8LV7RMFDP9/gjPOEDMHB68m3wDZ6qByGVZ/p1YZZ2wgnuuFMmRT9sZMfNw3niN41Z
+ yE0j8LuuwN0rfnaRY6LzU0wM59tVhxAQykv6zLBoXA8giA8gWIALuabTU7nhg+3MSj/Bs4xj0IWf
+ mADOcdod3crvbH63H9ErH/71nem21aH10+/6M0+4XvMP0O2B/ZLw5R+7YDXt/Ds09O7Dqjapi86m
+ Z5E1sliU2+V561X6wCfO9jTLTQhjA8UiOsjWh5+QBRRf6j3SXnZkKH3QuivEJwdNllvJwWPALD/n
+ Y73wmW76oX0pGG+KBpa9cAT6GvkPnA9V6AUzUdkLtqgrqSoO2sGQcruGPfmintCXfzwr/BzDXnaJ
+ z5G8C/6U98LfLT+in8cv/vCfcTqMxHmUf2TiT07Ojzg8+c5B5WHfRR1CDkUfxCPrdCOeVj/IxSMP
+ vrkOLpPi1fo0O8ieZzx6kmHVSfUlPvjolzD5x797uJouwM/C/8z0Sx+7wIgH5v7AHlhHPvGvUBW8
+ YbGGKBZH/NGYMovLYvfRBrzXJT0siyfwXhzzk3joYWpH4iB86NNf2FnR9J0vQpA/zgdtZOK4iLGD
+ 8u9uUDzBAMPwS7QC8tjCLAds2hE/7VKkn2YvHBl8lf8Zv+3FF7zDP+0cl+MwfzqUxC4PvnAzi3+Z
+ l3jECrMcWzyFb/VY8qesOBOXIzgrz+TPcZOfwGMYF3HBl/FSmXVx5CmjPviY1UeZUQ8jlYd61zHl
+ UPRBPPbDOy8TpF/7CR4DjFK89ldxbKqH6EZGVacIVIO8Mn9S08C8+CHuJx+55Rbs2QfueuAOrGvf
+ +SLU44X+DIXasLioQ5ayZMxz1YXLkfXifLvMY5zXxHaJs33zMxyJRXRcI8WR/ixr0RRH6mNe+Aii
+ 9F5iNiefHBcxQ3aelJNfjyPPNu94iAtM+iGfE4WCflKc8yqO7MLgtdnA+TO2aIgQUcZtv4u6ApUO
+ xUL+CpDKaTrv0Gr6zsecO7356RdPz3vYYai7P3nZtd60Ey7XOUcreB9X49vUH1U3WLjeG+rT+DO+
+ ckD+0Kc9dQM38iHzmf6GpXIgz62trRce+an3vqjqdD8/uOr3s9OJ37daH3sL/o7gBfoUtKE5skk0
+ Qq/PBG0sux77kifk2kzMFoTVzCFv1scmDXwY9sE80kcQ8kd+waKJzWOEHZZ/hyMe6TfFD0Xhac6L
+ OA3pKOXwG/oZTha+Db60i08WFiuBOW6PQ6viGIX42ocdmf7BY8+drjhvfD78zZuOTX/zvbdP1926
+ wyNxfohQxuGgeDeNDnt+D9yyL+qNAeiT9jNnttT5MYPoxnw+Y7Q/1o+HVuRRnilnWtS7jwMINCwE
+ qEE8oh8OhRt5NR4BfXMdGs9wbDrIjk90BuKxeImP+Fy3CEDzc15obju6Pvzk6fmf+SEY3a8XY7t/
+ r5e/9ch05Jy34/Xys1zk2CyIwiWLEZsu9W3VvfobIvbmisWEnnLZhZx88uNVKb4Bj+bThAxxg0XI
+ 5af0UPMqfeYRDjivy3IdAm6f8h8gx00h7ApfNHhQODEhHPLG6Lw8X/LCT83P+G0fBI429DJXOHP+
+ nm8YTE84f2v6oSvPm65+UP3maqnyhm/cTq/5+F3TSz9wx/Spo9ziSBN/do3cXMynjfZH5IaLm4rx
+ LkbZA76LH3Py2/jT34w9+HbxcL4ipwPL49BKFs7jCrh5Mi+alUJ0cic4+zhoaT8IQqLePJrIWxAU
+ Dx56XpUHAS3++TxUGbZwJPdEjx/Pbz566OgXT//zk+8i4v66xqfA+8vjkSP89cY6rNhkLAKvKhqe
+ q5livnAN7yahpS/zRDNgSnI2ccnNT61Z+gdIPd+aIeTeTRUn45be/hVPyBiwxHbguIgZ8tBHnEFR
+ eRK9rIvDpMJ+ySei5E3RwLIXjkBfs/gxZZl1gyCYectedIu6kqrimKZLjmxNP/D4c6c3fP5Fex5W
+ NME3bqdvevg509uuvnj61ivOnY5ATrfDH/1HX+SY/jjm1eqzCV95As+0juuH+uBLeufX8g7FwKkw
+ YQccPuSneFIPQ+aJ+TpkQg5FH8RDV0UTkadf+4n6REzGs262qzha/ew/eLkvxEu5x01ek6p/CSsc
+ 6Wd+n37k2JEfMPr+uyuk+83dK9/5dXizep2KFMXUZwAEkCWsselVRRaZ1VSxd0dsnmgK8i3w0nNx
+ yCs9bvVgWtNTb9zQh18NS33EIn/BQ/7MqOJ1ZuV/l948ipuPYVd4mvMqPzEhXM/L82W38FPzM/5l
+ vnST/Ok265bzq4mf7f7CI8+d/ja+V/Vgnj5383r/bdvTd+DLxP9y43ZG6THXqY3Omwu24fLCjf4I
+ uTYjTBidlruPjV91oV2/9uIRLhlJaOZxaCVJ8IVjx4M6Aq9lTB6OoIvB/Ze0oiqCkGCPD/LMriUP
+ 5J5X1UN2I/75PBiDdvB7wrjmFzzb0/pPbT//yp+fxXEfCoz6/rmuffdjpvVdv4VCX1rN51WLTRjF
+ RTRZyhqBU7Ha6NVdhB98s02pRQxcEA49nKkXNulzsRleGQovN4xT8Th8FTEUpWdT4cP2RJin/GPG
+ 7tM/xmwyokkUYw8Tiprv8Zff0M9wsvCt/DeeXfyYmOPa+sDu6Zccmv7ZVRdMV114qDGf2uOvfPLo
+ 9F2/c/v0/tvhNNerj5toQ7/sC2+qqCvslFcfO68KRsSGq/NDXSjOt6s2MRBcFcq5zoQNd9THZjfQ
+ OAECR3x6KjfmG3k1HjqIS3rwBp3jaPmVfYVnB2N+tx9RK58FLxRph1+vfOOx1Tl//P76fpY7P7O+
+ r0Z+3+qcc9+A3xz6+doEKMJshF+tYR9RbC9CrkIbN8TpzRWLSZ5cLI4hJ5+WphymPhaFftl8spch
+ brAIufyUnuy4Si+0mrPsDMA98iY8M6Zdu+SXcszbX4nNT9gJ13iXdgs/gy/z3pQv3SS/g0u7R553
+ aPrex583Pffyc1rU9/wR39KafuSjd04/8KE7p5uP4Rvz8F/rpV2IeGLz7PLW9Yw75NxU3opajayG
+ R+B2+enke/FwvpjwGPLZ+IbFcu3srN987H1P/ILpJasdyvfldf98D4vft8JhxUTYRNyMszHmuUWq
+ FYCTvAfeTUJGX+KDRTVp2CXO/uzfvLCrh+q5sM/4Il4CxdftmUfYMYTSZys7E8clgBxWfMrUfNTq
+ yrpASLvCk45X+tEhGrLyznQMLHvhmKivwTdw/swPvWCYV16pp91qOhdzL/7M86e3PuPie/2wogd+
+ RfnX8H2tt+L7W9/4iHMjz1gHAph3v0JWnlm3HAWP/sEz06K1xk12gccwLtW59VNosq5mZFhmHodV
+ xukx1ML1N6y0q/UE3HE2O/mkPOpgP0POgF0Hl6ny3FQP0WVFHH/h5ceM9KOAVLnkHX6zDopna/X0
+ w0947/dkLPflyFjv2+tH3/W0abXzptVq6xAXZ/kZLT9zZwlrjGJT70Vt44aIzcMmRVGhn9mFzCKX
+ v+FIbMMN7Y0LIui5ePRv3rle5k0vmOIoO3sQT/lXNwRvUHBQ3H7QbOEZL68WR8kRb+UtWNRh4Wfw
+ mdDyMt8exzR9Hd6mXvqEC2Y/piDf9+Htulu2p2+/4bbpv92Cfx6Buz4XaJPPrk8cRq038KrLpjH7
+ oY0z+uDdxcP5rCsNQlZfpMh5eY6B7UM/+NAYsnjkxzRclc08MiBp6M2jibwteegv6iA7+XccPf7K
+ j/YCxpBy5NHjN854zuNp+9g0ffb05668wdb3zf1+eMNa/2sskg4rpsDk2HyzMea9WFFK4CTvgQfB
+ rCLiy2ZofhJnf/ZvXoDqgfOWicumoj55BQjZZswj7BiJ4qxBTUWA7QWQA/MD15qAWl1ZFwhpV/hM
+ N/3Q3oEATT8pGlj2whHoa/ANnPOFXjATEcfvT73+qRdPr/qci+7Xw4qRfvZFh6ZfedrF07990vnT
+ I85FmzLBfoWsPLNuOQJXeeJZZcpxk13gMYyLuOBLeyrlT6hRP3oYh0zGmXqAQeB4su+TR4rQaxCP
+ /cgJbuQZfWQ/Qy6U4iVv8LT4ial6iG5kNObTjxnVn4TJf/IOv1kH49hB06HD6+lf2Pq+uyuk+4z+
+ R/HT7KvpWvHjgdXME382AsBAXLIYs1nCjs3j1dgccr0pcHHJt8Dv8lcOzTfgtHecQeTIBDDvXA81
+ r9JnHuGA87osO07mu9QHKvExFr5o8IAElR9NhIu6SjSw7BZ+an7GP8/3ksOr6Xsef8H0Fx513oQf
+ WH/Ar9u319MPfeSu6Yc+fMd0J3+Ya3nF5qz+aJuV4c/6qssb+nFGvRcP57OuNAhZfZEi5+U5Bi4b
+ /eFDY8jikR/TOF7igpY0mYH8UBo8UudtyUN/WOc6XCiTTTz2RFPHlfOc4GyTY8K4wQfDBY5/SXr1
+ F7Zf8MRXSXEf3Bj1fXPxVx1Pd34ABXvoLgfcLEx2MdZmgkFviSx6H8u+kwdf8XR+4mKNhj4dRRlm
+ +lxshlkKBSZa0uEh0yC986kB7qDHh+0FwC14CZdkHmpt3/Tya73qEWEKJzgmmqLHVXyJ04RvI38T
+ pnwILzJ/8VHnT9/9uAumy07hxxSai/vk8aN37Ezf8/47ptf9AX5WMQqvTaR1yPXCCO+z/ulyLlgf
+ N0Xb+cNeMM63qzYx1xXzlMfKVpiaz8MqgMbJT+Bor8j1gBsv89mP9cVjgFHg0foDXnHkBFmo14hb
+ PfT53X6CuIbiFU/WOfJV3vpHWT+xfdH02OnrrrpPfrXyffcl4erOl2IjjMOKxcOlzaviQs4x5iN1
+ LxGb8Dj43LTk5CVeWOTmSz+Js2yceWFUD5y3bHvzUC87PwhvfcxLT++41Bw1oCdMaHsBcGvxSZ/8
+ 1ONa1INTwx8lXOmH9hU/eVOk38GrOKKZct5mA8dN8oxLD09veMal0z+56sJ9eVgx9kfhr/q88skX
+ TK9/ykXTlfHjFKpv1i3HyF954nk2aqGjPgs8fdSlgvb1sma+nlln4PDR6wqNDOQOCq+j+Qg0jx4U
+ YK1ftzMD7mGHJ/sZsiCcD4Li0cPA2T9xpFOkMh3zUlhPPsZBWIunf/J1/IkTUHz4OcuHHr5leqmE
+ ++Dmqt7bxPxG+7Ttb7Rv4o5iqjosXitulnI2Qq/PEG1U0WnXryVPyG0VojmyuWAsR8HDQeFQ78V2
+ eKUIfZoxLoevMOQv0sHE7uYyTzVJuU//GEFY+shvyPJSDtU0Lf5wL/uKh3EET1jP+B+BQ+D7rrxo
+ eu5n4P/MnUYXvzL88d+/c3rp+2+fPoXv9s76A3moLJvGXLA+bso79PVmkhjOt8v66BfMU+bKOwLK
+ Xq7CaUJA40KuaUVe5vHAPkNfSOJoGWJd0rt9jBNhTABV9hWe8xjzVpRccSQusiJv58tKt3mot4+t
+ VvfJN+DvozcsfKOd/1ewX23zcRW92WIErjYlnlU64HPchIdBZzcfLIqH+ly04Cfh0Jcj8Yiu9Bmf
+ 8YpIfN2e/JQjjNJH/NFeytMe5LD8S5/8wZF1gZh2hV/6ob0LFLwpGlj2whHoK/nOx9d/L378hdNv
+ ffGDT7vDipnwr/m8ED/+8LYveND0rY/CX/PhxKJ+Kg+wszEWTPVZ4F2huGs9Wz/VtOuLFdKM6wwc
+ PrwcSz1gULjux+urjDN5w6H8hB2mdn8SNM750E/wtPiJsH/rtS/E2+fpl36Cj3omVDiKLY4AMh7z
+ hZ3nDx3eWd8n34CP8OzsXrnnN9rbYbGLN4rp6mo1lXQVFQZa/ByB92eQMapIUbTiD97i6X4IYrbV
+ PLl4nI8yzPReHKdRirCP+GA3S1P+TOf42cRjkTOAis/hqJnwGIbhV6LjKnyEmfFy3oVKXIoBjLyE
+ kwPfnvvw86eXXXnhdMX5888pDXLaPfKv+fwN/jWfTx3NZd485oL1cVO2oR9vHAHifLusj3XGPOVc
+ Z8KGG6wrPqTn8iROgMBxWguqB9x4mW/E0XgMMAo8agfATc8HPMkPB9qFWA99frefIK6heMUTfBHf
+ bj/IZDXhG/BPepUI7qUbY7j3rpe/56HT4TvejQ0yvneV7FE8bT4V14snGRgVo4/Aq8hZ9D4mZxvN
+ E4tJngV+5ld63LRGLsGAR/NpogFDLj+lB4ZX6YM2M+K8Lmdoe+YbGZc+UCnHWPiiwYN6MSaEY7My
+ 7sg7Rrfg8HMVflTghz/7kukZl927P6XuyPfH/T/hr/m8+L23TR+6AxsmNynrgT+uRxuXfQh5dlFm
+ XZc8wiUjic2sT04piij4rA6e7GualUIBUjRrO1yKBxrhGX/0eciC8BYExYMH9UXgKg/JI/75PHnM
+ 6PhErAnjHH9M1Lwe4pZ8+Gs7n9i+c/XY6X+5974Bf+9+SXjkzu9FzD6sokiVSCtaNkGNAGWSsYRD
+ pl00TY0kXfC7uK25wi5x0mvNWzNozbw6oit9LErIvZsqTvG3MJos2tgejksB49bii64YeqpHM+T8
+ 8EcOXOmH9hU/7VLMfGIEjr9N4Z981sXTG5/50DP6sGJ5vvIhR6Y3Xf2g6e8/7rzpYvxMBqvg9dhQ
+ nw31JocuFbSvV067rliIgNnDOKyWesCij/obltdXitBnnMlrf/bT+gJ+iychGMU36EJudshHdVC4
+ epL16K/II8JnPjKoPCnO+UhgHPl8dT7MPnTrnJ179Rvww1N6PNXxJL/Rnif+bITPLGGN+ZkNo3dj
+ GzfE6DeMWEzydbuQVUzySsatHohPN9SDRxMNGHL5KT0wvEoftJkR53VxNK/c7tIHKvEx2p/pTcNA
+ Iz9OCNd4mx0f/9KjL5z+zhP5f/7u3c9NjnZ/3//grp3pe/G7t17zsTsVqOue64ORmxhF6uMsow2H
+ llaR87l+NAh5HFrJQhyugHszpz/7F4/8mMZdgrhoFuaNIOioN48m8rbkgZz5EWL/Hnv883kCTcj5
+ eNLQ44+JmjfO984Hhu3tra177Rvw92IX7+gb7ZVkJRuphCx9NAk3W+IrScCzKXIsXMO31ZQD80Qz
+ YCb9JM5yX7RyFPaWHUfEhQCSVzwhj7iol7kfpM9WoGLkB0DJso+uMH9wtPxy3vFs8EN7BxK8KTqg
+ Z1x2RG9U/+eTLz4rDytW9PJztqYfxm+U+LXPv2S6Gj+2cR6+Md/rWn0FbM7TThcXNtbD65XTrq/X
+ M+3Aiw8vx1IPOyi8jsfrq1i/6gv7s58Wt/wMuVCKl35aHyz6abTLyGj0F+Mmrxn95sRnT5h3+M16
+ GUc+X3M+zK2nQ4e2t++1b8APT+nxVMZXvPO5q63p512tPShRPOrzxJ+N8EkrlyzGbJawY/Mcj198
+ uZjkW+B3+SuHfCA+6bEo5NFEKBhZyOWn9DJv+swjHBCny7LtAc+MSx+olGMsfNHgQeHEhHBRV1D8
+ 0QsOTy97Ev6CMr6xfnDNK/CxO3emv48fg3gtfuupVmNDP84stEu9SYWHMu3iyXDioBmHVrJwHpfV
+ aF8easEHIm96PYhO7gQnjnoa8yqCkAaPJvIWBMWDB/VPEM0PE3kwH3F4cjx8MGHJMWF7xx+GMVSg
+ Jc/5WJlDz9v+xitfZ+ZTv9/zN6w1/l/A1vrvKjlsnkoyilShtaJx8y/xLkYtjfSV9AY8AEXNB/uN
+ ZkiZm3nm1zjzAlQPARO8NUPIvZsqTvJKT++4mizaaDLHJQBuLb7WBNTqyjwhpN3wF5j0Q3s5Yh1W
+ 0/l4e/h7V148ve2ahx0cVlGq5cC/k/jyz7oQP3h68XTlBWj9DfUuG9W5r5c1uS5YIU1YBg4fXg7P
+ Dz1g6pMT9VUsZ/FmJF7f9Gs/jisRHKWXn+Bp8ad+tIueZE47z6cfTSsfKVo8edgmn0bquc/imvMx
+ LiqgX2+/ODH3ZByeTpXl2nc8G78F5/UKOoq0kSoOjzzxZyMMVLQ+RjMR58OgjRsciA/FU1HJ0+1C
+ VjHJKxm3enDNHX40n+wbMOTyU3pgeJU+aDMjzutyhrZnvpb7YhOmuP1gK9i3MJsf8xL/9fjrNC99
+ 0iXTw/G7qg6uk6sA/nri9GO/d8f0vR/E75fH97q0Lt5dgyD6uTYhNFo14WL9iA55HFpJwZXDpQUk
+ bBxaapfk4Qi6GADPQ8TmjSDoBk8irFjw0B/7R37Sv8fIJMzSX4sXmrSz/7QffAp4hhOd7EadBMBt
+ NeHne581feOTfs2oU7vf8zesnenF3HRKLkfGEkWqsFrRNuG9mC5NJpvjJvyS38V1HN0ucY4vix7h
+ GagQFR5kx5H5WO7dVHHSQPjIsMmijS51XMSwGVp8kpM/OFr90m74C0z6gf0fu/TI9Otf/NDpFX/8
+ soPDKspzsgP/Ujf/cvfbnvGg6ZuvOA+/7jk2axKozn29rMh18Xrm+gGHD6077XR5lKg+aYdMyNVA
+ kiWJh+ZFo7iiHzkvP0OWK+Hhr/O0+ImZ9RH6jEzzecrk1bT8KKHCUTv8Zh0YD/dnXsNP8lMDO3wc
+ Wq9fkrhTHYenU2H4t++6Bv989X9Wlgw6irSRKvR54s9GGGQJawReyXfe4/CLD0U5G96wHoZvJr/s
+ yZdOf+6KC1S3jfU+mLxbFXjvrfz9W7dOv3Ej3gPyapu++hI6b9ac0QRu3pSajU2fhwL3NBfKmzn7
+ uvHID+XcB3n4wU5XEYTUDr9AWGGC4sFD7jPq54fJiH8+T6DYIk9ZaqLHHxM1r4e47eZz/JzfXq3u
+ 0VvWPXvD2tp5MYM4mTcgrQYS2gtfSQLjRc9FOzl+8bJpuEjNz9xvX7RyhAfOW7a9ebLJotti6Pxh
+ lwRqyog/utRxCSAHFV90xdBTHX7xmPOFR3z86yd/4wkXT9d9xcOnFxwcVizqvXY9EX+Z+j889ZLp
+ 1fm7v1pfj6091gVP8u11wrrho/rOmtBjUF9Qf7y+6n3T+kp+Wl/Iz5DlBDfF0fuvxU9M7yO/EY34
+ R9zkNaPfnGSpCdFl/MFnLQz4UhHX8JP8VICXeQB26B5+L2t4So8nO77yHU9BDG/FCe4s+5vQJo7Q
+ 54k/G5WSW0DFoyxaLHLnjUXYTO+inKlvWF/7iPOnf4i3qsdeuPnf/NtUk4O5U6sAf+fWD+N3b/3g
+ B2+f8APzY7ODTv2pXZ2dismQx6GVfr1puVdp6M3M7cK+thyKPmhz20/nkYEm7Mc8ibDCh4K2CSbs
+ Z+Dmh4k8hNnisM2wlRchnujxh2HZ6yFucz+YzPpgxP+iW++st54+fdNVb+02J/t86m9Y+NVESBm/
+ qp1FiqLkSO+VbIQS8l74ShJwL1YrYvLmuIFfvLAsnogr47DfWMQMz44UoMKDbPvMx3Lvpjl/S1P+
+ LIs2ulR+7QH3Fl9rAql5a/ml3RPwmf8/PvNh008//aEHh1UV6r59OBdvsn+Tv7/+Cy+dnodfD+31
+ tM9cF6y0JixjXfHhdordXnrATqqvBBMPiWO78EkE6Xevw0p6+Qme6P+yg+z4SKcnPNBPzqcfTTsO
+ wloe9YYYdtbCTnyUlnyUOev6MAAcGPj1w9t/h7Oncimku23It6tp9VY6V7BRHEXXgp/xcp7Fwagi
+ 9RFABuKSxdj0xZt+ZsQWxAuGM+UN65JzDk0veRK+IfzYi/fFb/3cUPKzZupNNx6dvv3dt07vxve5
+ 1KfahdmxKEPI49DK0mi3VmP7cMj+p1l0vPraNGbNQ6TzQCM86aLPQ06U90njgT73GzHLw4lMu+cJ
+ 1HTEJ4QmevwxUfN6iNvcDyYVp/NmAXiYnWQAAEAASURBVKHnW9ZV0wuf9N5udzLPp/aGtbP+Tnhl
+ bZ1UHC48vBisrhwtRdB74ytJ4LVoLDaexZe8OZJzwW+/0QzNLnHmMZ95AaqHoIPsOGbFNZD+Sm+c
+ Zah5lT4PX9ah1UNd0OJrzSJ73pAf/2/VX37cxdO7v/IR01/BuB9+RXHFd5Y+PAP/N5a/3PCf4pcb
+ 4gfm43KfV99h3dxO0f+1voBX3xyvr3rfRD/K07yP9jqsRn8Hj/px9J/7OnjRZ3gyO3AjbuI1DS0e
+ qCgcxTmftcQJKMPhJ/k5Dbvksz/8nPn2d8ngbt6Gp5M1vPa3nzBt3/Xbq62tw96koIji1LiJi0kx
+ WIxKqo/AMxCmWGPTF2/62cB/JrxhPfOh507/4ikPna68+MiGDA+m9kMFbsQ/oPh9H7htesVH7pi2
+ 2Y+8NHpTqn9j2h1NPf6o/Xk4ZP/TrBShz/7PQ4TkvBpO0uCROm/aH+QNHjzkfiNkfpjkTlvOE2hC
+ xydLTdje8cdEzeshbnM/mIw8xRcFwgF2bGd96Ml39y3r7r9hbR/9bhThcCbjICKJOGQii55DBO3i
+ 8ESe2SmnWCQ8O6dcNFZ/N95FGC4cj3FpT7vE2Z/9R83SkUiipoormyqbTMDoAvIM/qL3Q7iTPprM
+ cdGFm67soysoPxp/nea1X3D59Ctf+vCDw2os6b58uhS/6/4f4XeJvRG/OPCL8Lrl9UXf4aP6QpF7
+ 1598X0U7Vl9k+qNvOGM/sR8Swvnqz2zrtm9C7/ggcF+Unx43eaGSlvZ+0l3i8Ou8HY/5iHIcvQ7m
+ c32iQAGcDm9NO3/dwsnfFdJJw1/+2w+fDt31uwjwME9uZbdp3EQYuDzxZyPwSrKPwLMoG/1s4Bef
+ mobF4Zq0+EJOPusxWQ9eQ605/ZJH9jI0MOTyU3qoeZU+aDMjzutyhrZnvquJ/8PvxU+6bPq2J1wy
+ 8Ru9B9fpV4Ff+oM7p+96z63TR27Hv5/IvkEKuendCZzAHyjUf3hwH1oORR/24BGBCrTXoSXHAXMc
+ 7mP642X/za8CW84TKLjw8aShxx8TNW+c73M/mJN/590LtLPeuX197Mjj8fuyPtbtj/d8996wDh39
+ W9iY+EegMukYsSlVlBzpcaya/beicXMv8ZUk0Mmf4yb8kl982Qxyj6rzsJj5dbzmLUeKL2qquO7r
+ NywW/QWPuWi67tlXTN9x5YMODiutwOl5+5rLz53e8kWXTd/9+POnC/j7t6rv85BAXmpDHmbZ97Fv
+ rNh1WLESRROnXfLudVhJH+3u/nb/lx0IR9/rSQWnvvCKT9M6NKWoQ41hjvyKl3rus7jmfJkH7IQD
+ KBIjbmvaOn9ra/s70/ZkxuHpROiXv/XIdPjIR2FwOZdi9gbDIPJwaMHPKEOvNwwWCbKS40g+/JmN
+ Ta8kw74Xp/P7zSWagnwL/C5/5dAlGHAX1/YicmQCmFdNE7IXNXBZBoqZEXG67PApl+H7VE996MTx
+ 4DqzKvAx/DNkf/e9t07//vfviMTY0biisb2Zs++5d0uhDaBtJHgeIrLuBJrY69DyPvGZoG5r+4yG
+ 9t/8KrDlfLgLPO2A8J18+HDcnB7zAsRt7idxYefAhBTPev3fd+6884rpLz/taOfY6/nk37AOHfqT
+ 8OXDSjG0YLEp5TxHeotkynFPLnE5Bp+3tNZOfFl0HlIn4pc+itntMg7b98WBUwMVosKDTFwtSsi9
+ m6w3jvaVJh9CxoAlZn3G4n4G/nWaa59++fSGr3jkwWGlip95N/4LRD/6uRdP/+/Vl+JfzfbWOvm+
+ inaswyHrM++jvQ6r0d/Bo34c/Tf6lm3pDqWHMZ9+7Ff9S1iLp/ZF2FnLvheQ4oKPMmcRB3nkVhOB
+ 48Tqj0znXPAniTqZ6+QPrGn1DXQlnxwjyHyTmY303JKgmPIMh2w6z5JfMnkSl2PjE7foHRn5ut3c
+ r+O2Hkb1EOFBtn3EFbKAiiP1HrUGdMur9EEblToP/zrN38L3qa7/E4+env9HL9KsDQ7uZ2oFnoYf
+ g3jjFz1k+iH8WuoH4+99uk+8ad3/0T/ZQOqz3jduJ9dn9DVlHRqwy32TNRy8wdP3De1qX0DQKeLG
+ HfPpx4z0w/DoUXeJw2/6N07AwLX9R2uZwy75PBHxkA//AtK0/Q0yPombozkR8F9ef9F0ztHfX22t
+ LmJoNJqNLEYcJjmqKBHcLnrOs2iLsYp3qvzBVzydn0FE4EOfjqIMM70Xx2GWQomLlnR4yDRI73xq
+ mJ73qIum7/+8B0+PufDgxxRUn7PwdhN+DOIfvv/W6Uc+fCv/GXftE/dh7CA1UGwH1Mdv5nqIahlX
+ b0I8rPBBuV/ed40nGzNwZR9u1bh0A726W7jYx5zXDhdAbkQXeE6UnXBg2OXH8ZXdLr6RBzA3r3cO
+ P/Jk/rGKk3vDOm/7z+w6rLhrcXHTMtjZaIX0dTsBvg4RGKimPAzwPONNPyQNPj7yEg4WxRNxJc48
+ xpkXRvUQdJBtn/kMXvGUPuYly70JQr7qkiPTr1zziOmnv/CPHBxWUZ6zdXgQfwziqoum38Qb1xde
+ dlj9xb7Lfh19Fe2o48Pt5JrpOAk85nlYRZ/3mo7+zrZu+xJA93Xw8hQpP22fiTe90p7P9G+77tfx
+ U2s/AgnX+WxHouO9YWGrXjxtbf/p5DjeeHIH1rR+0ThRnQJlXhp5uFDO0Qrp63YC/CZ+lmrGexx+
+ 4VCW4ol4MKEQzGM+82K6Hjhv2faZT/jPRdHasPgZV9jRAwgefO6h6Yfwg59v+6pHT19y+cGvKGZZ
+ Di5X4IkXHZ5ef/WDp9f88UumKy44pD5VA6pP3UfqK58S2bYwdmO6vyl586ec9R39nW0934/ua/vR
+ IVl+Wj9r/5hRb1gMqHDkjX3B2dxX1GNf5jX8MO7wRzvhakL25hMMf8dw56S+LByebLf7/mPvuHw6
+ On0cwJVL5xR8okewKjoQOe5mcVLQ66RfjsAzkCW/kmcxkjfHDfz+DBKLSb5uF3LyyU855EOF5/gQ
+ ie1DoWIzDvPWZ5oMOAg+9DV/dHr4+fV3Nzh7Vl//7HfvnF7z8aPT33z0OdPXf8aZ+28h3t1F/oWP
+ 3zF909tvik2f/R1tDjIfSpbNHY3G/i/9ODyMoYINWoP4c7/JDnp2uw8bPcl0eciwz3kZp6eS+6El
+ fzOcYPbb5xX3eAnodo5Hvo6uj01XTN/8Of/dLJvvJ37DunPnz4FUv5UhU7STKJ6KlMG0IirI5jRk
+ FSHflHKM5Jb85SdxOZJ2wZ+LMCs+8ImzXy+CecFRDwETPA495WV874I5f9Hr4ZIjB7+imEvznz55
+ bHrKb948vfSDd04fwL/I/Fffc8f0rLfePL395m2qz/prHCLRn2PQYcUCjfbmPhv7yofZkLOYo7+z
+ rXl4DdzoW9Kp8WU65tOPGeuNKE4vb4c5H5HGkc/XnC/zgB155JZ+OJ/7zHZQ4cemVs+3tPf9xAfW
+ 1upFSoXJgydT1RsI5TgUZiP9qSh8iCvkGY5Bt/klf/rl6s3sSLngN49x3S5xtne81oOjHoIOMnFq
+ CvKHHA8l2yz18/xCOiuHD+GnvZ/3jlun51932/Rh/iIpXLlu77x1PX3F226ZvvWG26eP41+vOduv
+ 7Ff1p/os2lE7LPpRRfKOMx7z+Kj+bEV0nW03+nPD/hKdEGZXv8c6gTm3Ff2w/+lRd4lzPmupEDBw
+ cU4EkQfYJV/NZx4yU+B4K3pRSHsOxz+wrr3+jyHoz2PIdXLiOWWycp5ZzkYreB9XJDXDpR1Qm/jL
+ T+JyJGsrkkUvQvFEXImz3+bH8OIRnWrv4na8MhZft2feZd4eGM3Zdd2Cf9HhJfgHS5/xllumX7/R
+ b1GqH8rgOro/WPKfwT+x9bQ33zL9U/yCvLP53HJ9ooHGoMOI3TPae/S15nmo4CPryzlerrPtQDer
+ e+o9Dwn7CAhOC1d48WracVBROLb78Jv+7603LATCsD5vuvadVzmCzffjH1jbR7+BJ7dKlmOkUCc+
+ k5ezNtIX5/sVsj8TMLg5vvzAZuav49IPeRf8jse8ac+4Emd/FDOfckQ2wwSnPuMzXhEpjm7P+MOu
+ CER11txY55/GAfRUfPn3zz9y14T/g1+X1yPq19aN87fhgPtefLn49DffPP2H/3FSP+BcvGfKg+sT
+ DTQGHQrMcbQ391H0I+d5aDSZWF7i6zzq12YHmcsjXu4L8PCinefTj6YdBxWFYxRzPmu5DwSU4Zwv
+ /NGOPHbUcOSTmED8TNbquN983/vA8i/n+7M8SZVKjuBPma58siNpBs3kM/gcI55MaoZreM6Lt/GX
+ n8TlaMfJrNF+7b/bzf06XuvLUdhbdhyZT+SnYjO/bp+yzKmIh7NjePvNx6Yvw/el/iq+xPsEz5xF
+ /l6PqF9bt77OH8VfZfnG62+bvua3bpn4j0CcTVf2q+qmvop9pV3dy8m+GvtKh0aTs2bi6zxcj011
+ F512gExrPbR+9GNG+mG/I5LAUWxxBNA4AQM3zgtZyxx2yZd2GM0ns0p4Z71+AYIYhKHOYe8D6xXX
+ fSkMH9tPTPrOVHWiU0ZRmOVsJDvn+xXyDJd2gjOpOb/kk+R3PI6j22Uc9ut4rYezeohwIRPH4na8
+ gIoj9R5pH2m1h570mff8P/Bv+H3bDbdNX/62W6d33Ix/z48psgmrEM7Z65F1inpC5fpipBll/HnD
+ TdvTM996y/Sd77tj+sP+mgbdmXq5PlG3MaAe3jejnK5U1RP66s9WHOk7j/p1Q91Fl5Vv6yGHxJu0
+ 3ohaPN1vj0frH7FwXuxB5AG85LFCSOOGv3S8tVo9fnrVO69uqc0e9z6wVseeS5J+Art00WxsUlw+
+ 2Y1LfCg01O0E+PIDA9eUSZ48v+KARfHkJpr5NZ95y5FCFAwK22c+4V/FRr6lz7g4yrw9hHyGDTxH
+ /tXv3jE95Y2fnn7i9++KdUfazJNdWYVw4l4PTrNuUU+oan1oRjlG/Da36RW/d+f0tN/8tEb+Q6dn
+ 8uX6RN3GgHo48VFOV6jqCb3fTOYFcp29DKrrXnUXXVa+rYcccp1cdcVBWIun++3xaP1jsThf/mkt
+ PvCSxwohjRv+0nHkv+eXhcc5sFbXkKSfmMo1UqgTNppVcuAVEef7FfIM1/DlBzbywybH8174XiS6
+ EQ4WxRNxJc48xpm3HNHcMChsbx7VWHFLocDm/GFXBKI6427/5VNHpy94003Td//O7dOt+T/4VF/k
+ z2zZlarTSF31hui6Rz1DVv1pRjnHsOc/C/id7719+qK33Dy9of8bgcCdSZfr0/sq6+F9M8rpClU9
+ UTFu6pSzJq6zl0F11foMHPWehwXXyysX/R7rJF4z1htR4RjfnI9I48jna/jpecCOPBWA/ZmvDPUg
+ 3A7Onj2uzQfWtb91Kcg/l03YT0yXzqnWCRvNKjnw8qWiNK8hz3ANX35gIj/A55hx1EjaBb/jmcdL
+ fOLsl2LyliOyGSa4i9vxioj+Sm8eyzIvPyGdEcMHb9+env+OW6bn4ntM78fPU9V6MLusB59bnSny
+ 8npknbwuOS8eCLMx1tN1X+l7Wl/z9lunb7ru1unD+HGJM+1yfdRQvb10KLhOmbEPl6onDw18pFyo
+ XA+3qfWb9lcdGnzw+ox1Ja8Z9aZDhU4Z4hjm8Jv+jRNQhpwffLajZfGFA+OGv3QcuM+ZXnH9gx3J
+ /L75wNqersFJKbZ+YirXSIHzvDQyyGjanFcTCxG3E+Bpt+SX3HnTjx13dvvnYiZP2GUcjs/xmhfm
+ 9cB5y7bPfCI/AsXX7WEQ0wpEBLOQTluB/xfvH7z/9unq37hp+r8/cZcKM6srM8t68JlNuMhf9RaM
+ dYp6hqyy04xyjmHvdcr6r6f/8Ilj0zPe/Gn9X0XGdaZcrk/UbQyoByvSy+kKVT1j86ec9XDdalmw
+ HHvUXXRZeeJj36n+rLsZFYfK7QnRYcXSb42Ml+sf15wv84CdcACFA+OGv5o3cms6tP0lydnHzQfW
+ tHWNTlCQ9xNTudIn/kifY+DoNOfxAG27Qu68HV9+YJL8ORbuOPz2O49XRZj5ddzmLUcKUjAoHEfk
+ EbIiIqD0kb/kyDH8hHTaDq/9/Tunp/zGjdMP4h9a4PetnNairswu68FnrMtyvb0eWafRF7vWmeai
+ c7/ILtc5Rv681g9+6PbpqW+6WT/HtegsRnDaXa5P1G0MOBKyDpkS5VY/HhpNLlSuB+BVz6wjQFV3
+ 0Qkh0zGffsxYb0Qtnu7X8YOXeq5/XHO+bAvELxxAsU+MY15lqIeGuyY0s2GPA2vnGp2gYOsnplNi
+ kIzRQXYcvec8HmaOUt4Lz/klf/lJ3hzJvOC333m8qsYsTsdtXnDUQ9BBdhyRR8gCkqf0xlmONBfx
+ xOxpM7wLP6bw5b954/Qt190yfRw/buB6Rl2QaK1P5pn1YIZYl83rkXUafVE8NMMfdonG4JXfXOcc
+ gSH/f8f/ofzWd982fRm+v8UfqzidL9c36jYGHUaRbqTnChnPennzp5w1cN1UplHPVj/qXWdYcL1U
+ eeJzPv2Ysd6ICsd1mq8jkcaRz9ecz/HQsvjaOpuvDPVQuGnz97F2H1j8/tVq63N1gjIZJg2qGvGc
+ Mj10HJtKshW8j0tF2hu/ib/8JG+OZA2+dGC/83i1iWZ+Wx65ZqUHExw6jsgj5FA0febRwljEk3Ht
+ 9/ETOAT+99++ZfriN944ve3TPARQmMo781vUlUkxX+L4jHXZvB6cJm70xa51prnoxDTHN7vO/45b
+ tqcvx6H113B4/QHiPx2v7FflFeVTHVzRli7r0uoXm9/2I3PXuZZlXkfAqu6isydaj/n0Y85602nx
+ 8DBJvzVSz/WPa87neBS/cADVfvMhVtsm5ws3bfw+1u4Da/vQNSDFQYkgQNJPTKeEafiVPsfAJV6x
+ tyS63Hk7vvwAnPw5Fi79kHDB73jm8dIucfbruM1bjshmmOBYFHx0vCJSPbo961P07UF0+/52DLH/
+ 6w/fNj3lv/7h9KqP3o74ETILw4eogyTORz2odp31YJzUMuBTXYlzHaOe0FIWD55nY6znDH+c9UaH
+ Tj+Fn7J/Cr5M/CH8NZ/T7dxyfaJuY0D1VXD3o6rphTGeq9P6s6od69J51K8b6i66rHxbD9WfeJO2
+ Nx1HgfnaF5jp8WifRSycF3sQeQAv87Ii+DKPMvR84PAvreJs2v19rN0H1rR9DaPWCRqjc6TT1mQ8
+ DCjHoTAbrZC+bifA18kMg5m/k+SXf1gWT9hlMR2f4zV/OVKICg8K22f+kZ+KiHxLn3lzjAzrIeR9
+ PPzXT901Xf1fPzm9+IZbp5uO8oc/mS8CZmHYVZGnJOW3qGsqiOMzu3KRv9eD0+SLegLq+tqO7miv
+ Mexn+Ga35E/51mM709/H32O8+jdvml6vH7kH4WlwuT5RtzGgHqpopodMXCHjKXnzp5ypum7Qs9y0
+ 2qvuohNCpsQVHk+xDPIjhfwn73wdSaB4uf5xzflsx4iMo0Hml3mUoR4GDuLW+prQ1rD7wFqNn78i
+ eT8xXToGyR50kBoDl3ixtyS6vBe+/ACc/DkWb/oh4YLf8czjVXFmcTpu85YjspkOCsdhHgaSvAKU
+ PuYlyzwI4nmfDr+LH1P487910/TVb75x+p3b/KXUyBdBIx8kotF5Z1qLugoWOD6zCTeuB6eJi3qS
+ HTLdsHtm42ydsv7DbsmfcvLzt0O84J34EYy3nx5/zcf1jbqNAXVhZXo5XSnjWTdv/pQFFp51tp3q
+ ulfdRSeETMlTeDzFMjgOKlo8Oiz7OklLvwJu4Ms8wEseO2q44S8dDxxg6xMdWIufvyJJPzGVq4Ik
+ l4PUGLjEK6KWRJf3wpefxu/aws9J8DueebwqwixOx23ecjTCY02BV1PQLuR4KHnERbzM20PI+2i4
+ HT8O8LL33To99dc/Of0ifnlcxY8YR74QqFDXe16S8lvUNRWsD5/ZxFUITnR71tH2OS//EGZj2Gsd
+ E5+jDXkf1x54/pDrF+HHIF78vtunG/l17z69sl9Vtyif6+GYRzkpt/rxUGlypue6eRnEQ4JWP69z
+ LBPm8STTMZ9+zFhvOoVjFC2OrD/14gs7zJd/epEb2AlXE/O+o+mMjzInt3Z9H2v+hrX4+SuS9BPY
+ KZkrT3iNgUs8XfUkurwXvvwALD8stmiQ/knwO555vCpCFNN+GVbylqMRHhxabx7VWPZSKLBhz7ha
+ muFHZPvo9rP4N/Ke8l8+Mf2j37kFv85lUR/EOfKFwIKz3aIOkth9mJjlnQripO6F4IR5a8z1i3m6
+ od1snK1T1j9GE/E+ruPg+eNaP/JR/HjGG2+arsVf99mPP77Fekah+4C6qKKod6ZqnPGsmzd/yoWi
+ QSyD6ip51G+sH91m5b1Ohdc6m1FxUNHiub/fsFar9a7vY80PrGm6xic1isKkkHQ/gV06pyC9ch+4
+ xCtlFUVPvoXceTu+/AAtP8DnWLiIZxO/45nHSzsvDgfG6dG85WjQlT7zDzsbLuyTT+blJ6QHfHj3
+ zUen/+mNn5xe9Fs3Tr/XfvFU1bmthz9zImQWRl2feWdai7oKFvnzudWZIi+vR4xt3co/MFqHHFs8
+ J7Pe83XN9Yox/N+IHyT76++5bXrmm2+a3rTP/pqP66OGi77KeqCuil8Dn/Cn5cVDpcmFqv4OHsnN
+ DrLqLbqsvNfH8+nHjPVGJP/EkXfOR6RxZPBV60sDXB5gR54KIP2Sz3b5MHCcl8E1gdCw68DSyQ2W
+ PpKzTmg8p0yGjqNTyVbwPi42Na698Jv4y0/y5mgi8eXNfu2/28GhIPbb8lAtoCo9nmHoODL/iNeK
+ po954SMC8dDzA3+95Q/xTfX/75PTm/7wqJosm4GRVZ0rbzfTCB+FiToYz/uirqkgTupeCE5k3bJO
+ oy/KPzCs1lgGMSm+6qPjrPdYN8ab67XZzw34C5DPx/e39sulTZ0F5xjlcz2yDhkt5ZYXKsZNrX5O
+ CEbJnUe8A1d1F11W3naSIh4N5OPKUKEVIo7inM9aKgSkqDgGn+0Uf/KFA8dDPpklcPjVPJj4PfV2
+ jQOL37+aVrOfvyJbPzHJoWA4RpAaA5d48bckurwXvvwALD9swvRzEvyOZx6vqjGL03GbtxyN8KBw
+ HOZRjWUvhQKrODkf0zMCCQ/s7SZ878b14Hpp0SugWfyYHflCYGGE97wkLjgUSztMOH+peyE40e3t
+ v+KBHd2QdjaqzmF3Eust/+lnA77ibX7wuC8ubVblG3Ubg9cLUUY5+IQ/0Y+SWL8hY0qX6tt5SJB1
+ AaLqIbqsfJ9PP8EXfWD/xDGK4bfWUzjyhR2AYo8EPMAu+Wo+8yhDPQwcRSU0+z7WOLCOHf5jRLCY
+ jK6PzpFOReGROBvswodCQ91OgNcikg9/Zv42xCPO4Et+xQvL4gk7Lhov5+PR/JisB85btn3mH3YE
+ iq/bwyCmyZ9+9LwPbq4HwvKiV0Sz+ihsN1PmTwvnlXWj6aKunMp6SN0LwQmq0558Uc+YV9nxPBs3
+ 4ZvdrvqeAF95hh8M++pyfaJuY/B6IdJIj0/40+oHmZs665tJSe48JGj1q3qILivvdZIkh+Q14355
+ w8K/h7o1rY89MfMcB9bWscdw0ic1ioJkGf04mZEM9fjjnPm0GR8KDXUjH67Om/w5v+QvPxFHx4NI
+ fHkTLxeTi9T8JM5+7d96gOqB85Ztn/lHvAQSoKHzhx2DWMTDqQfycj0QFlcsuxABzepTMvOFwHoI
+ b5wkzkOxtJMB6yF11IfPcZV/1S3qCV3x4Jnu0u1GfK47OZf1DVl2ictR8Hm/kmI/Xc436jYG1EMV
+ bem6QlUf6PubTubkOrhMqutedRddVr6th+oZfQDSetNp8XS/PZ7j9xcjBC95HJhCpr35JNb6DlzY
+ Ma6tVf2e93FgrTzpkxpkbHKSsgnoMkc8pyzKhks853sSXe68Hb+Jv/xEHB2/5BcvIiueiCtx9tvy
+ yDUjLsOFQ9tn/paVsfhSH/PCy5wT8bA/BtcDYbFLuOhxzeqDuZEvBMGIj/ykp+GirpzKekjdC8GJ
+ bm//FQ/s6IbVmo1RP+FOYr2z3nvhndfww5j20+V6RN3G4PVCoKOdXCnjmY83f8qZk+TOo/WJPhZf
+ 7mMI6gfy0k/Opx9NOw4uEJ50x8DDJP3WSP1x+4vWsBMOj22dzUc9rpwvHCfD32pnw4G1vdakT2ok
+ wSCYDJuHpjmSO2QMM1ziOQ+FhrqF3Hk7fhN/+Yk4On7JL15EVjz0xyLM/LY8GJ4dKETBBHdxHWfk
+ R6D4uj35PT0IKtsH/MH1QHz4UB0ioll9MGeZdYPAegjveUmch2JpV/WQuheCE93e/iseOKKbdFej
+ Agi7k1hvB7w3vuKFL6XFoPbR5XpE3cbg9UKcUQ4+4Y/rz/C1yZvMOV7i6zwkyDqGnnUQL/eFeG3n
+ +fQDlbS095PuElscuV7kER9RS77wx3iFE6DhyCcxgQ3H+fC3s+kNa2v1GEHoHCw+sT2Ss05iPKe8
+ F57zPYkud97yIziTYoiDv/ws4ul8eqZdLELFGXlkHPbb8hiOBh0c2j7zH7ziKX3MS44I2qJlTA/k
+ 6HogTla0uiLzy7xSZr6IlgUXvus5N+8DzlQ9+Ez+Rf7lP9ahy3ST7moMe+FOYr3T3154znc/DHM/
+ Xa5H1G0MXi8EOsrpChnPujGv6M+WkOtgO+W9V91Fl5UhPuokh9EH9I8PFZAjZYnDb4/n+P1Fa9gl
+ n/yk3+EvEx64sCN+tX4MJV7+kvC160PTzvQ4TvikRhLRhBwZco14TnkvPOd7El3uvAxSsuDhB8/J
+ n2PhGn7Jbx7zdbvE2W/LI9eMecq/HROnplD+UQ9G1OTB39IMHpHtg1vVlV0XTcKwnF/mlTLrBiUT
+ E77rObeoq2DkFdr8i/zLv+p2nHUWe/fneGV/nPWer2vwN3zlGfwMeT9drk/vK5dfbyIIdJTTC1P1
+ jM2fcuYkedBpnWvfiC/3MQT1A3lddy57xpN+642I/SA945uvo+ZP2F9EwU64INKQ+4x6XOF44DhZ
+ /j5z4hmFywfWrW+/At/YukgQJgNjn9geVTI2gyig5qikY1zgyeOi6Mm3E+DrpAc6+XNcxiPC4EsP
+ jmcer4ow8+t4zVuOBh0UjiPzj/xUbNYl9TEvOSJYxJNxPVBjrQ8WPZuBsVSdW13YJBJZGOEjP+E5
+ t6grp2jA/KXuheBEt7f/igd2dEO72djiOZn1dsDhZ0P/VZ7hB8O+ulyPqNsYUBdVNNNDzK5U1Q+y
+ 18u4TEr6zqP1iT4mS9ZddFn5Pp9+zFhvOi2e7rfHc/z+Ih/iII/cOm7HE31HSK5/4cJO86tLJp5R
+ uHxgbW9dlU59UsfJxiTZDADWiOeUSbAJz/nk03OT98Jv4i8/EQeTkn3jS37Pz+NVERC/4VzNlkeu
+ WemBKn3mH/lZ0fSZd0szeDKeB3rMOvkzFivpq+pcebuZJAo26kQLwxZ1TQXrxWc21SL/8s/5tm7l
+ n2b4M5ZBTF7fxOdIHwv+lOUncTkKHn2LZ6VFjn10uT5RtzGgHlmHDNYVqnpC3990CqU6u0yq6151
+ F50QMq31UH25zmZ03/B5xNP99ni0/jbT+pV/WsscvOSxQkj7Hf7S8cARNvpuOjY9hjM+sNb4LjwW
+ m5dPahSFMrz5JPQ8fctn4nIMXOIxTQMNdTsOv+FMas4vucVxPH7FC4Yer4ow89vyGIkoRMFYUzyw
+ aOILWYumOFLvkQEHfXuojB/QB9cDYXHFsgtZX+XnkQGOfCGw4MJ3PecWdRWMvEKbvwpBZbe3/4on
+ /QNDd2MZ+BR290E/iXwf3VyPaKAxeL1UhwzWFar6AVH9mRDhWWfWL+qqh+jj0KveotOTrL3+uV7E
+ m7TeiLRCyTvnI/LE/UUU7MhTAdif86AeVzgeOE7aH83yRxt8YK2mx2RT6+SEcR+dI52GT46xCTqO
+ TnM++ehL1wnwtFvyp9/iPQ6//dp/t8s4HKfjth5R1QPnLTuOzD/zBJAADRFnyU4v/YT0gA+uB8Jm
+ l6BueTm/zMsjmyTzp0XmSRvNS9/yTgXrwWfyG0hJV/lXnUZflH+gGBXtNYa97HKdcyTjgj/lvfBL
+ P6TYT5fidqGj3lkPVTTTQ8iukPGUuA6jnpmT6+AyVT1b/aoeosvKE9/XNfpAXgEkTP6Td/jt8Ry/
+ v8gBO/I4ME6E3+EvEx44ouzPYfinGOINC//bkE1HCEcm0UbnSKfhM3E5LvCYJpGGuh2H3/Dd/Ol3
+ GY84F/z+DOS4u13G4Xycn/VgqYcIFzJxagryhxwPJdss9ZHhIp6YfcAG1wP5sEvaZnd+zpPBjXwh
+ MDHhu55zi7oKFvlLrULxqa7yrzpGP0Fb/vGsOuYY9ZPdfdBPFdg+eXB9om5j8HohxtFOqHPUn6Hr
+ sGoy53i5brZTXfequ+iy8rYrvHiDL/qAHs3PKObrqHnhyOCr1rfWk/OwS76az31WhnoYuLADXuwr
+ fNsKV7xh4fSKpvZJDRBlgtk8ANaI55RJ0HGJ5zwUGup2HH7Dww+E5M+xeCMecS74FQcsK07qWZyZ
+ 35YHw7ODQSe4i+u8Ij8Cxdftye/pQaCnfXFzPRAfl5t1iGtWH8xZZt0gCJZ5cSLzW9Q1Fcyfz63O
+ FHmV/1iHLtNNuqtxtk72V+tuQt7HdQJ85QmLkf0wf6CfXA8WEBUYg9cLwUV6fMKfqIek1p+Q8xJf
+ 5xFvs4PMOohX/UBeyjmffjTtOGgg/7bTYSkC21nL+AWU4ZzPdoqfPBWA7c0nswQOvwpv9N3Eb1vh
+ 2pr+5fX8v4OPSqebPsMpFQSlEeCUSbAJz/nk03OT98LXyQxs8ufIKs/sGl/ySw/L4slNFMW0veM1
+ bzkShWBQ2D79RX6MSHypj/mYHgQZzQM/uh6Ik10STcaoZvUpmflCYGGEj/yk59yirpzKekjdC8GJ
+ bm//FQ/s6Cbd1agAwu4k1tsB742vPOFLaTGofXS5HlG3MXi9EGeUg0/4E/0oifUbMqZ0ia/zaH0G
+ ruohOlaED66fJDkkXtOOg4rCMYo5n7UwOG5/EQU78tgRJ+An85DICc8XjqL9OYzVo/ijDVvTkbse
+ D/ShdOqTGmQMgqRsHprmiOeURdlwied88um5yZ234zfxl5+Io+OX/OJFZMUTcSXOflseKt6Ik3DV
+ Snlm/saHouljHgHKruXHx/1wuR6Ij10SzcC4ZvUpmflCYMGFN04S55d1TQXzl7oXghPd3v4rHtXX
+ dnSXbru+1jnX3YS8jysKL7vE5QhU5YlnpTUs98WT8426jQH1UEVHX0WFqj6Q+5tOJuM6MG/nazn6
+ GCDKqrcKrieZjnmvRJTVcRDW4ul+xS8tHQq4gc/xqH/IUwFkPNF3ckP/9JY4So7//2fvXYB2za6y
+ wPc/nU5CEjAoXoay8IIZCAm5QBDRGhFrlKmxykJriDrKCASJiKMU4zhT5eCoU17GKRwZLEdrnBKm
+ RDOiI1rlBS1jB+SWEGIIBJAAIUAg5Nb3TtLd55/nsp611/ue7z//6aS7z59071Pn23vt9axnrfXs
+ /b3f13+fPl1l3LG9/y0vwP+b4tqvoitJ/aT2k43dryczgkXhFoRTGOgK1/Pg41KjmjrFT3/nwVp5
+ ePlqv3mTxwF87eF69vUybt/XyCPxEN51YS0486Z/48sx/LW/6JunC7rNiz4f3hLqUKN17r7TLwCC
+ EZ++09ZBV3Ixnjiuh840OTq/cKVn7TMN43bzqOdWznudG+vIeV2cB+mu1Mh9bR0tJ3SRomkPNVup
+ 1hN238/Rkfw6t9L1It1FF+V9TrKkP/UzaX8jGvXMvLOem98v8oGXPE6kBIw3n/Ml8cJVnHB1n+44
+ /7X4Gdb1ZyukLvXuyUwwLwMAPWMdW5R1WXdxdvB1jZvwE3SKv/NUHWxKeRywuCuelTVP1YUN4Vzf
+ yJMzaz9g1BQ2RZv4cgy/ccZXGcVT1m2fVD+q4KXIZWBR7q/qb5v9wqDgwk8/9w66CkZeoc1/6L/z
+ c3+cW+cXq+P7nLGnuOAzJx/njMp3Ef6YJ2FXZVbduUC6d5LT54Uil5w+GOOp17ifoxnr4LjWc+jX
+ eohOCEWv/eQxqe8N19wPb70vZNc+/ciTsedzHDtrvnFufp9VZPbFxwTcP96768/GA+uaH1iV1E9q
+ iEIbJH4SsiYmJYW55Kc9cMFjmw5N/VL2RfhT/Mp3i/yuZ1+vTn2Xd/SxGlGJgkl7i+s6qz+JSD1m
+ fOzqsPJ0v7d5YT1QL09s3f51jkMXvQmih/DpmzMbOejKLTqoh9wShqsenV+4uk/wch9oxe1mJ5K/
+ 71HdP5GWfySobdZR/JnJf8jTcVdkYX1KtzVBFykqeV0q7aEf/D4v49KO+CYP9Tqlh+ii/NBJ+jJP
+ sjKea2+IbtZRwMvvFznASx6lDV/6oB9jx0ebm+7bYdy4hgfW9XP8TaP0qTpMmBE8Z0L7yYl1bIfd
+ iOd++LQe9uTtPIKzKZa4+DvPoR7SHfnFC4aus/oIznlHHyvRokNCx6d/26pIfPHXvvAK50YtrsZk
+ PVAWFa3LwMp2+rTNfmFQcOGNk6W2DrrGwf65Jv+h/84v3UpPQDs/w2hnrnjF3cJ5J99F+GMepLlS
+ Q3VTAenTE/SQomkPNVsh42n5zR87TcledNL55PtLdFGeeet9J/3rHigrgISNevjQSd6e6b/p/SIH
+ 4oQjHQtI3pWv9xtHlPOpWsbhWXUNe35gVVI/qf1kI0l/UvESicIt+BOCtVql3axc6pYrj5vwE9B5
+ sFae5LtFftezr1ci7PKOPFJBiZnemiOx60j/tlWR6oi/9oVXODdqcTUm64Gy8CuXgZW1zkMXvQmi
+ h/DVn/CMOujKregh9xSCGzPe+bsexOl8gdnNo57cu55NyNc1LsF3n4hgnqs2rEfptiafF4qt9rjC
+ 77qPsqjfsrGlIb7JQ4J6/xLQeoiOinAx95NH266DsMaxipXX9dPrPI468tGmB3HCkW7mJV9FZr9x
+ FYd9VUs/nlXrgTWefGTxE9szOftJzJxli5JxBzz3KdZu3ITfcDbF1hZ/57kFfj/x9/VKjV3e0cdK
+ pDIFQ0L3mf5tqyIC2l/7shXOjVpcjcl6oCwqysOu0ec4dPEnJwCCrT4ZYthB1zjYP9fkP/Tf+bmf
+ 8xPf4ZwZXvuYpH/jR9yRP7byBJe5eMSLNeerNqxP6bYmnxeKXXJS4bqP3Mcvn5eU77asg+PU90W6
+ i04IxTKu8cpjSt8brp1HdLOOKvDy+0UO1E8eJ+KGzrnvnTe83ziah3unB9b29DcsSVNn6E8iHCIu
+ v7Tj7EXbPvP4GY0hnJdX4fXWPwF9mdKmbhXvVvXjyXp03+m37qDeXYf+Vzx1Kj0lE/Mpy37ufAM/
+ 4m7Q9xI88888V+FMZg3WhwKue+R6YWNUe1zh99Cv3vzRl1gO2Yuu7BEXPUQXZRwnSwmJLz7mpUP5
+ iaO557OXDgEV2LoXkSfEha/3x70TkRMvHDedb9THH7qfP/1vCSmNVOFscdcnVjt0drtPpHlW49Ao
+ 9e0eqh9FXP4JmH4B1p1Bv+qLfZcu2Nj1HQdxXOs2G0+To/NTF/inzTRE7+bST7jgM5uQr2tcgu96
+ EcE8V21YDwpIfXryeaHYao8r/B76wX7KfsPazvDAOj97+t8S8krUu8efEH6y8674k0wL3Z3dJ0ht
+ I7wItLoSL66bV73eDVXVrn6VPT7p+N4QPn2nLetBd3glGPtniN50WtHSCE4z/NMWD1C7WQdQ/MFn
+ JmP5RT7sHf/Ac3/yd9wVWVgPCsjz6cnnhRpXu9R16AfE/KaTdqzD4BHviIseoosyxJdOSki8Gfub
+ jk+4ytzzEXn5/SIKceRRWidw3pUviReu4mZ95/wZVv6REIctCGeCxuwembRycr4Jvog09csl+OMn
+ Ytujjq6LpMUXftfjulOvVZ59uW77EdmLooPtvOnftoCqI/7aF74qONSTum7XbD1QJ29JbiGK2ena
+ NvuFQT2Er/7k595BV25FD7mnENyY8eM+1T7TJF3PpZ/qPtw/8rlArfxyCb77BFptjdCrsPT5lG5r
+ 8nmhwGqPK/yu+yjLb37HY6OGdXOc9NX5jDjY3keA7gN5ic9+8mjbdTBA+cO757MXcTe9X0Qhjjxd
+ QPKSj36MWiwcN52v+8Gzav3QvZL6SY0maLMZzGolM7lJdRM8U80mpj15w2945YER/syNq3omn9Z4
+ cT37eiXCrk7jzNuJFh0c7jf9L1710/7al10VVJ6ybvvU58Nb0rci/aWv2OwXJVMY4aefewddBSOv
+ 0OY/9N/5uT/Ozfo6jumS9iR+xLlAJq5R+RQXXGZAjnkSdlVm91sXaE3Qg4qw/lRqhVof+J+y37B2
+ P3THYXP4Se0nG1WbT+C+XMFlLlzw5Jlvkmmf4jccbwqG4bdmXj7arOsW+IXjYR7iUod5zGfeToRF
+ lQuH49N/5WclqiP+2q/tRaDVlXixHqhTTxV27LHTB1urXxiCUe/qT37GHXTlVvSQewrBjRk/zq/2
+ mabPGWvZ5OO+eKN/zXbI3y+X4LtPBJD/qg31aaFL79JBylhe12yljKdufF8MXaox6+a41jPvG2Ba
+ D9HlBOZ+8iQrbAnH/fCuvLOe8XQdeVYcidY3p+ynD+fTfWIe/FJewZyv+9E3LPxzoULQHMepTyzG
+ Hj+x+onPOFyeXZyJ+LrGTfgJOsWfvLfC73pcx4wDsWpwfSOPVFDi8mNCoOtIP7bLMfy1L7zCuVGL
+ qzFZD5TF0683Nytzf1V/2+wXBoUTfvq5d9BVMPIKbf5D/52f+7kfZIfNNEnXc8UrLvjMycc54xL8
+ MU/CrspsfeoCrcnnhSKrPa7wu+6jLL/5HY+NGtbNcdKXBEO/1kN0OQHi6zyUsO4BOPtnScof3lFH
+ FXj5/WKBiCOPC1PFzrvypeGFq7h9fU//W8K8mXG2PCWYFnd9YrVj+I0zXvpzoxZXY7r1T8D06/51
+ q0oHduK2bviks6PuoC7bof/Oz31euvJzRtiND63hb/yIu0HfS/DHPOzlKg3rIaHrXmmCLr5H1R5K
+ 9v1r/WDzTR07PcledPYP/eiX7qLLCfAYs588ZuxvOqOemTf5jSNfxe34fE3YWfNVY87LPjpQi4Wj
+ 6T5dN4D4F4T4ofvT/5ZQ0tQZric/NqAR7Vq0raPlvvyMxhDOy6vw6rp55KyTFXu4v/TlmZckbTJi
+ 9U0/4+jnpVtxcrB/uacQ3Fg41cFLXPo0DzDiyzz8rPcYp3wkzrgEf8yTsKsyW4/SbU0+LxRZ7XGF
+ 30M/2D4vKd/tWC/HSVcSREfx5fxIF+V9To1XHlP2Nx2fsOqZeV0/q3OeFNK69/nQg/qFw7L300dF
+ Zr9xFYf9VZ//LaEj1AR7gZugMUsy2JqZE7/7CTtwHadcTDMGcRiTd+K5f+TvPId6ikhTXlyP655x
+ SFhw9uX89mO7F9y37TrSf/pU4CE+fFVB5Snrtk/WA/XzuOsysKjWeejCy5T+GRGdjOfrQdc4KIvc
+ pQ/XNTo/iXN+8HV+rCV/5lFP40ecCyxyTpfgj3lG5JVYWp95r6KHFE17bBS/6z7K8ps/+qYZ2YtO
+ OreOjINe0lt0UX7uJ48ZfW+4XvX4nsSuWfeFfB4rT3DcR/3CYTnOre8dIdlvHDdvvHf8Yw1305VL
+ 7Sc1muMlZ5O8NHLXjHVshy1c8NwPn9bDnrwT33mADX/mxlU94mR9Y4gXkc1T9acO52VZ6QfBTiAW
+ 0cG2P/3bFlB88de+8FWECEZBt3mpflGDP9nYqMfq3/qtfuEXjOeZvjkz7qArt6KH3FMIbsx48pWe
+ tc80pN3NpZ/qDj4zsFUIVx6X4LtPoJnnqg31aaFVYMnp80Kx1R5X+D30gz2/6aQv6+Y46SrCEQfb
+ +6SL8sRnP3nM6HvDNffDu+fTPv3iE+zA5zjVL1wRaUofjkvDnVdpna/7wbMKf6zh/AMKqaR+UoOM
+ NpvBrFYyMxl+y5+5cMFPPq35chN+uysPjPBnbt7kGXxccriefb0SYZfXOPMiqBdVHmz3m/4Xr+pv
+ f+3LVvoiqPUVmPp88qboMtd5cmv1C4N6CF/9yc+9g67coq7sX+4pBDdm/LpH2Wcaxu3m3TlF/5od
+ yNc1LsG7r5VnBV6Nlc+ndFsTdJGikteVWql5nv5mYly6kX/y6HyWfq2H6KK8z0mW9CQ+WbGgY9Qz
+ 88569D6rQlYeE5kPvORxIiGNW/mSeOEIc/1dH55V/BmWH1h4GAjCGVn8xPbM1P0kxjr2RXjuzyam
+ PXk7j+BsiiUu/s5zqGfyac041b+vVyLs+hp9rESLDgndZ/pfvOqn/bUvuyqoPKnnds/WA3VS0dxC
+ FOX+0lds9gsnBRd++rl30FUw8gpt/kP/nZ/7OT+yw2aapOu54hUXfObk45xxCf6YJ2FXZbY+dYHW
+ 5PNCkdUeV/hd91EW9Vs2tjSsm+OkLwmGfq2H6HICxNd5KGHdAzD2Nx3lD+/K6/qDI5/Hns9xqp88
+ LkxA41a+NNx5WWf12f3gWbX+kRDNCcKZTYzZPYJcFCySWlyMLyJN/XIJvp/MCAh/5mM9p/hdj+ue
+ cTw0Dvfj2X5s9oL7tl1H+k+fABKgqXRom+wYlcfG7X+1HiiLt0SX0TW5v/Tl2Z+c8FMP4aefewdd
+ BSs95C59uK7R+aVT6Qlf58ea6Up27TNUcYf7x/0b9C29L8If84jjCr2obl8oCVHXyee1a9cKGU+9
+ eP+WnmnJOlgm6XqR7qKL8ta78eI1Y3/T0QmFd+Wd9dz8fpEPceRxIiVgfN877uQ8G8dN51v18R8J
+ z5/+GZakqTNcT35sUOP65NGhlO0zj5/RGOOh4I3b++q6eeSskxV7uL/05VlvguqfEatv+hmHy8WH
+ CFbhlYN6yM2FVrQ0gtPMy1l+zuIBajcPP+s9xh35Y+9wiQP3MY+rujqvqpsKsO81+bxUf2qlrkM/
+ IHxeJ/SePOIdcdFddFF+6CT9iXde3xuuvSG6WUcBL79f5AAveZQ2fOmDfowdH21uun6HKR4PrO3p
+ n2FJmjrD9eTHBjXSm12Ltqml9mub8fOhIPs2v7hulMVbkluoMnl50pdnXqa0yYjVN/1shP59nBzs
+ X+4pBDfM2zPydz3hgVN1ZHYi44LPbCK+rnEJvutFBPNctWE9Src1+bxQbLXHFX4P/WD7vKR8tyW+
+ yUOCoV/rIToq4vi1L0fn7W9EjWMVo47oTz/yZOz50gfihGPamZd8FZn9xnHf+VSt/XxgPf0zLElT
+ Z+hPZj/ZfRbt0Bn3JzcPaZ7VOLQ6gts6+RPcl2zdCt4tXh7P7tuXSeXr8qQv9k0cX63HjJOD/cvN
+ hfE0OTo/93HZpi0eYHZzxQsXfGYT8nWNS/DdJyKY56oN61G6rUkPBda65KSuQz++iYedvqyb46Tr
+ RbqLTgiFtk5KyDxm7G9EPmHtz7yuH/no98PkBF/6AK9wgFQC5135er9xpHPf3Y9+6H5W/0hYSf2k
+ 9pONJPOJaelYJGskTc2FC74cmvrlEnznQUD4Mzdv8pB0iGTTh9A89FOcXV7Xa95OxHDDBLe46q9s
+ VSS+GU/+ph8L0d32lz4fHLp0qIp2+qjv9AuDwgjvPmVRVjiOcRKM/cs9heDGjHf+rgc6Mg3jdvPu
+ nPb3j3w5R62HLd7ci8yC7/N03BVZWI/SbU3QRYqOdq1U6wf//KaTdqyDZZKuuq+l49RDdFHe59R4
+ nbMZ+xvRqGfmnfXc/H6RD3WQx4mUgPHmc740vHAVl/ui+zF/6M43N4af1CCrN7ufhN63dHXZboIv
+ Ik39cgm+8yDAmrLJ0/WIs/jCr3oR0TxVf8SUn5ph37ydaNG1P/1XfkaIb8ZDr9reEaSg2zi/9BPu
+ 3H7nr6i/5oy3RIftglb/67x5SSQnhRE+fbttNnqMaz0YQn4R0PCQ3lhq5qUrf/PAx3SsQvPwk+8Y
+ d+SPvcMlrvLu+LF3VYberBGcc8nnenMuqdYKtX5QzOdlXKOOPLJP6C66KOPzkVX1aAJpfyPSCRHH
+ Mvd8zG0cGTz6fIvIE+LI40QCGke+DvR+42g636pv/tCdl44QzmCZs3tk0soZXOYDHtsk0tQvN+E3
+ /Eb+5D3WI84Dv+pFhboMAKT+1GHb++YFqBdVLmzHp//ikYjUJf7al61quFGEZd/G6Zc/69r2jz/n
+ E7dvfcUnbr/mOfynfjbqsdMHW6tfGIKtPhlhmQ+6xsH+uSb/yfMwf5+f+A7nzPDax6R6Gp97ZQdf
+ 16h8PtecV81AdZ9Yi39F3vaV3qy5L5xLPtcpRYectEdffBMPO81YBx9D6zn0az1EtxRZ+8ljxv6m
+ 4xNWPTOv8gFqHPk89nyuR/WTx4UJaBz76kDvN46m++5+8C8I1w/d61LrSQ6WObtHBIuCRYLqJnim
+ 4qXbjUvwOkSG4Xf4M7OrWY94D/yux7gZlzoc77rt70SLDg7XkXy2VRHztb/2ZSucG7W4OtN/8Sue
+ tb3xt37S9uf+0+duH3eH63N/6cuzPzlRN4XhCVSfshR20DUO4rjWm878NDl8HjXn/GqfaYjezZfc
+ jxv0vQTffVYeTFdqWB8JXXpHD+u4rpOVaj2h3Pymk6bkX3TW/5TuoovyPh9ZSshzNmN/I/IJa3/m
+ nfWsp86Rjzb5wEseJ1ICxve9404lXjhuHu6dHljn9UN3XjpC6vLN2T0yaeUMLjOTzzjszyZoxt7h
+ Eif3jfzJy2Z2cYOPSw75USHnGbfPa5z9COpFlQfb8cm3eMXT/tqXrfRFUOsrND3z2tn2tZ/6vO1N
+ v/WXbV/8yc/e64M6V78wqAdvVfUpS9fioGscxHHNy1b3hyaHz6PmnF/tMw3jdnPFKy74zMAe+WNf
+ hHdfKw8prtKwPqXbmqCLFE17KNlKGU+L97vu52jIOlgm6Xp4P7YeoovyxI/3i3hNqjoIG/XMvLOe
+ PGyE3vG5Hp50841zNp/zpeGF4/7h3t3BP+l+7ezp/5aQ0tQZric/NnC4/iTRom2fefwluAhqfQWn
+ T372Hdv//dJfsr32837p9mnPu6P6cn96E1T/vCSr79IFG9YlepRglIW91ptjtm3dCs9LXPo0D8Pw
+ O2mnn3yyM5P4qO/gO4U/5iHFVRrud96r6CFFR7tWqPXhm7jOY/ZjvSzTup8ndBddlPf5NF68Zu1v
+ Oj5h1TPzznrysGEk9xdfjg11kMcOJTCO9TlfFgvHfdfffOfXnv6T7hGb7zlpyjcJfq1PrHYMP6Rk
+ AM+Abo5e2Lyqr694/p3b9/6WX7Z9/Quft33infhzw90vKtblSV9uzG0dPunYHB11B32bI4Q7709g
+ 4UpPhVFfhOP3bnYi1UM+67vilM/Ufr0E775Wnhl6FdbWhwKWjp6gi3Ws9lCqlWo9Yff9HI1Yr6a7
+ Qb/WQ3RRnvg6DyWk3iZVHYSNembeWU8eNkLv+FwPT7r5KoHzrnxJvHBk8/mrWsfhZ1jXr/8CXUnq
+ JzWa4KOPyccT09K5BfkVtnDBTz6t+VKP0sk78Z0HUGvKJhl2a/yuZ1+vRNjlNZ95OxEWVR4criP9
+ V35Wojrir/3aXgRaXfkX/FPi9hWf8nHbm/6zT8T8HLTGflE2heFjpHSQxX1s9PmUnq2H3FMIbpQ+
+ meseZZ9pkq7n3TlF/5odyNc1LsF3vYhQWyvySqxyX1tHyw5dJLjPQ5VaIeOpm9/8sdOM7DoG6Ut9
+ TukuupyAz6nxOmczqg46Rj18mCRvz/T7YaJA7i8+8nMbccKRThviMZ/C1n7jKm7y4Vl1bXvO834S
+ hI8mqZ6cBLGImt0jkzI1tjlXkRMXPFOFT+thX4Tn/pG/8xzqEecQyfSurHmq/tThvK7bvIjqRZUL
+ 2/Hp37aA4ou/9oVXNdyoxUfPxG9YX//C527f/XmfuH3WJzzDevCESwd24rash+WqPqMHQTifY//S
+ W/HforHqAABAAElEQVTkKz3LFg/DaGcu/XxO0X/FHfljX4T3OS5+pLlSw/qUbmuCHtZ3XScrZDz7
+ 8Zs/dpqSPXl0Pks/+qW36LRS6NpPHjOqDsJGPXpYznOSF3E8/xp7PkSTFpmbb8SbrwO1WLiK67qv
+ P7o9/My3Xdv+0Avuhetnk9RPajRXl3A+Md1SXYIqcuJYnWzlWk2okkvwnQdg5QE+c/PehN95nX/G
+ 7fuituHtRKs8BNqf/m2rItbf/tqXrXBu1OKjb3rh856xvfZzn7/9nRd//ParnnVH98lO3NZB1zjY
+ P9e8hIf+fR7cpm7rXrT+DMNvxmuu+B1+xB35Y1+EP+ZBmis1VLcvlASQTKhQ30Q493WyQsbbr4fW
+ Aqgv6+C41nPo13qILsoTn/dD8lim/kbkE1Y9M++sR+fvsANf+sD5k8eFdb3m60DvN47mundg+tnt
+ q190P/5Yg/bfnqR+UqOJuoTziemWEMoQ+jOz6YEvh6Z+uQTfeRAQ/sxU6zJ+12PcjENglYMZjs4j
+ 8eBqvxPbn3zpU4GH+PCJvnnK+qic/qv/5FnbD/yW529f++s/bnuGblfaOujK7nTemLjmm6d0pMnh
+ 86g551f7Oh+sd3OfA3WN/jWbkK9rXILvc0YE81y1YX1KtzVBTyk65KS9dNCbfNjpS3yTR+cz4mBT
+ B8nG8+o82U8eM6oOwhrHKvZ89vq8HOXzdp7ZB+LI0wUER76KrMXCcd/5XMb527njB9bZ2Y/q0hFS
+ l2/O5OwnMdaxsTyJ5374tB725GW1sgVnUwjD7/BnbtzAH/nNs57IyROc7dHHSsTqDENC4nQppINt
+ VTTsVVfFNYGoPqpfnoM/r/VnP/U52/f95udvX/hJzyz5DrqmX+rFNS8b9RnD51H6jXOzvo6TjgzH
+ 75P4EXfkj6244DIX3+QfpV2Jpfst3dYEPa3jktMXtfWBv+/n6MQ6+Bhaz1N6iC7KWPfGgzl5+xvR
+ qGfmnfWsp86Rz/XwhJuvEjDefNVE9pmvy1v3DoX9KJH1Dev8R5PUT2qQoVlWz9k91oyg2CSYuOC5
+ Hz6th30RvvMAG/7MzVv1iJP1jSFeRDZP1Z86nNf1mhfBvahyYTs+/dsWUHzx177wVcShnlHaR+Xy
+ 1z/nju01L3ve9m0v//jt1+CPROx0ZUfRg2tetkP/0luwdY8IbR6G0c5c8YrLOWcG5sgf+yL8MQ8p
+ rtJQ3VQgOlImFMg3MceS0woZb//8piOw8CYoOsTTrntcfvGLLsozz3p/s4LkVR2EjXpm3lmPzp9Q
+ jD0fbe6ClzxdQHArXxIvXMVVffCPB9bZtae/YdUZric/NnTm7WjbZx4/hcXgm/ZjcHz+L71ze/3n
+ /ZLtz/+G5+hPy+eT1Q8p30FdtkP/wWnmpSs/Z+kHrXbz8JPvGHeDvpfgj3mu2tFYD12wulfRw/eo
+ 2kPZvn/G0/KbP3b6kr3obtCPfuktuijPa5v95DFjfyNSfuJY3/4ciTSOfB57PscxsvmqMePI14Fa
+ LBxN5xM7n1EY/oZ1x/Wnv2HVGfoTAofIjwYeEmcv2tbRtp8yYgjn5cfa6zNxS/7Er3n29sbf9Anb
+ F//KO91e+qel26xr1a1bt9IP/mlLP4bhd6m785PP+q+4G/QtvXe4xJEX/snfhV2RhfVAhdHR1w16
+ WMdqrxUynnr5zR877chedDfoR7/0kOBRZuikhNTbjP1NZ9TDh0ny9kw/z7/GyjP7QJxwAFUC41a+
+ 3m8cCZ1P7HxGYfiB9dyX/Qx8jwrC5GxuzO6RScclqyInruNMxNc1LsH3kxkRu3yjjpvxqw5ENk/F
+ RUzXSW2rj5xZ1+XE9qd/41WR+GY8daJdLfZitfyxtvpV+I+q//ZnPHf715/9vO1F+NPy6p9N8hIe
+ +pfecFn30rPs3T1ieHCZD/cP2zfwJ9+OP3HFM/OI4wq9WJ/SbU14G/pCLTl9UVtPvonxK3Zasg6W
+ qfU8pYfooozPp/HiNWN/0xn1zLzJbxwZKg6FL74cG+oljx0CMt58Hej9xtF0n2jj/o3PKAw/sF55
+ hj+Htb1NEHh5GfQErdk9Mmnl5Exc5gO+HJr65RJ8P5kRsMt3op5T/K7HdSdel3qX13Xb34kWHRyu
+ I/2nTzlUWNepuuhX+FiU/TE8vQJ/Zut1r/j47a992sdtz78T2uD8lxBu3OdR+uV+wNX6YY0o3+Ha
+ xyT/8f5x/8gfW3nCn7l4Jr84rtCL9Snd1qQ3Mcvse1UKtZ6w+eaPnZasg+PUt+7nwtHvfUTwvMRL
+ fPb1NOm8/Y2ocTyvPR9zG0c+jz2f62Fk81VjxpGvA7VYOJqV7+z8JzY+ozD8wOJq80/h/aRGE2yK
+ zfAS0JsZ69iKGrjguW9RtPILcRiTd+JP8XeeqmPij/ziRWXNU3UF57yjj5xZ14XikNDx6b/qtWP4
+ 0wdntTUWZX+MT/zT8l/6yc/c3vi5H7+9CvM1bozh84hOpSf8fT5Y63wz9zmA5xbOO8L7XHNeF+cZ
+ pV2JpfXRhat7FT2sY98rvGnpaT3rzR87zVgHX8Mb3jcA0e990kX5uZ88ZuxvRMof3lFHzot+8VVc
+ 55l9IE44FSKg6yGf47JYOO5XvvOztxdqPrD8U3g++Rg8Z7XCSyQKuDlXkRPXcWQfTdCMfRGe+0f+
+ znOoZ/JpLXofQvMwP9XY1em6zYugXhRMcNaR/o0XUHwznvwVxyIqD5dPpfH8Z5xtf/UFz96+87Of
+ u/3GT8A/JtbQOWO9O++yJTvWu3l3TtG/ZnIe9b0E3/eg8pDiKg3rUxdoTXiL8h7Pdte91j7fxLmf
+ QvrFOjtOulKfvG/EV+8v0UV5n0/jxVt8rIOOUc/M6/rpdR5HHflo04NzFI502tC9MF9FZr9xFcf9
+ a342cWd9w6qfwvtJjebqzb6ezEw6Lhn9tAeOxci2Q/5+uQTfeRCgPMBnbt6b8Duv8884HhqH6/Rs
+ fycqv23Xkf4rTiKCB4Fdp/qmrfCxKPspNn36c+/Y/uXLn7v9nRd+3PYrn4mruNN93YvWz3LrPdHn
+ hT3F5ZwzU8sWmgbGKf6BP+Zx0NV5tT66UHWv6t77KZH22Ch+D/3qze/41Y91syyt5yk9RCeEglsn
+ 6ck85uTDRG/AUQ8fOsnbs3DkqzgQdH5smQ9x4asEzrvyJfHCka/ynfvfEHJnPbCuP/p2bvhJjaRo
+ liSc3WPNwMS+CM99PmR2o+zJG37Db+TvPFXHxB/5xYvKZr0SYZfX/ZkXWXtR5cJ2fPq3LSB52l/7
+ sqvLY7+75p86xu/5FXfiHxOft33tpzxrw8/opec8tz6fyJ95d07Rv2bKd9T3EvwxDymu0mB9+3tV
+ 19FPidGuccZDBr6J657PfuSv+4jpYt1FJ4TCW6eqJzL3N6JRz8w769H7rIrZ8+XYcI7kcWEjL8+3
+ A73fOJp1/tdPfcN6xjP1rw315ATLnN0jk0ZUzONJyawTr8zl15ovl+AZf+RP3lvhdz2uY8bt87pu
+ +1FTL6o82K4j/dgWkPW3v/ZlV4fw11/qWRtP3enj8POsP/PrnrV962fyb4KgbqUnJLnhnLHnY+Bt
+ jq7Rf8XlHAUyUMsd/yV5OvY2L/Smpi6+UHPSw4jlya06jVOf3OebGL9iCyI8dXZc63lKD9EJYXYk
+ arx4zdjfdJCPQ8c48ia/cWTw4P7iSx+olzx2CGgc++hA7zeOZvX5DP+RBu6sb1hf9iL8NTPnb9aT
+ k0nRLNnmE1O9Ikgz/RgTF3w5NPXLJfjOg4DwZ27e1EXS4gu/6kBk81T9wblO12veTiQK0cHh+PRf
+ /UlE6hF/7cuuCkDwqCVJSU/Z+QEI8b/81Ae3L37Lg9Kzzw+K9PlgrXPIXOfpc4r+NVPJ8nOpcQn+
+ VJ6E3u5Zb1bVXxdoTXiL5n2VKmkvHfSwGnajyDd5ZI842NJbdFoptHWqejTB09+IRj16WBZA59Q4
+ 8nns+XJsqIM8XUDuAevrQC0Wjib919+8fdnL/ZeMYmc9sATf7tKTEyxzdo9MWjk516Nx4pg9+1iY
+ Ma+X4Bl35E/e5r0Jv/M6/4xLHa7TdduPwnrBfduuI/2nTwAJ0FR1tl0NiiDNPjVnyvkP3/Xw9jmv
+ v3/76+/44PbB66XfOLcbzhkxPgbfF59T9H/87tNVOxH16QtV96p00Lu67qOK9sU0Hvt8E+NX7PRl
+ 3fqa2n9Kd9FJcbPj3i79yWvG/kY06pl5k984MlTcji99gJc8TiQg483Xgd5vHE0EnJ3dJUe93PDA
+ 0pOTZGiW1c8npqXDNqnoz3zAl0NTv1yC7zwICH/m1NEzSYsv/K5nXy/xwckvk+LVdi+W7TrSP/el
+ sgG7eOpTcSfqSV1PlfmH7n90+50/8MD2VT/60PauD+Eqnjpv6V76Yy35M5/C515RxMN5x/a55rxq
+ FnyfhxRXadx4r6JH3lep1vev9cSbWA+tgx7Woa+p9R/60d/Xne8LPT2Iz37yOC8fJgpoHM29vkQa
+ Rz6PPZ/rYWTzVd3Gka8DtVg4muC9frMH1p3P/k6ArpPFT2zPaoXNm8Kzmgaa8wGvzOXXmi+X4P3E
+ VYnNn7y3wq86ENk8Vdc+r+s1L2rqRZUH2/Hp37aA4ou/9oWvDo/91vbH+vQePJz+5I89tP22Nz6w
+ /cB9+rN9atnnEZ1KT3j6fLCW/JkvuR85R5Hz5RL8MU/HXZGF9akLtCY9FEZ7bBS/h3715o++aUf2
+ 5NF9HXGwpbfotFJo6yQ9iTdjfyNSfsvNh0ny9kx/P3XG+RaRJ8QJB+7e90Ms+Xq/cewaxHc8epcr
+ 8uv+G9YffuF78Uh7C4P5BMzsHpl0XLIqcuKCF/VoYtoX4f3E3fMnb/OmLhIe+MWLCpun6g/OeRlW
+ feTMikeTtLe4Ew8hnK/95sn27E/rp8DLw9Dvb/7MB7fP+r77tr/3Cw9L19m29MOGdaz7VLbOFevd
+ 3Oew7l2fO4nLz6XGJfg+Z4CZ56oN66MLNa8X36QqdbXri9p68qGBX7HTl+xFd7HuoovyPh9ZSkhe
+ M/Y3nVHPzJv8xpGh4kCw+HJs4CWPHQIy3nwd6P3GwTw/+8H58ysC9g8shVy7i1X7ie3ZPTLpuGTj
+ SXnEi6YfnbIQ6KYmb8eRlw+jA3/yNi51kfLA7yf+vl7GBee8I4/EWzyiE9ziTjyUM0/7zZNtlpM8
+ Wn+Mv7zufQ9vn/f6+7b/6W0f2PgD9qlzWvd5RKe6T5LpcM7YA4POn7HWfX//uH+Dvo/xPonjCr1Y
+ n3mvSge9q2e7vqitJ/x888dOS9bNca3neL/Q731E8H3RebKfPGbsb0SNY30rb/IbR76K6zx5v3Mf
+ ceTpAlhn+uhALRYO5tn5XeXt6cQD6/pdvBx+YntWK2weYWm1n7D1UJh4sUuUzoNAN7XDJQ95T/An
+ 77GeU/yuZ18v4/Z5R57VyKIT3OK6TuN1uOpzxoNg0Xee0fHH3PInH3p0+wM/eP/2e978wPaTD+If
+ /9g/uxw6p2mfR+l3s3NmOH6fxI+4nGP4Y/uc9veVGO6LF2vOV224Xwq47pHrlaJpD2XTrv5kjfs5
+ mrIOTaf++33DuOghuigz95PHpP2NyCdcZY46WDd56ef511h5yq8JccIxIPvpowO1WDiYh59fEXDj
+ A4s/x7p+ft1PbJDy0gDYM9axSTBxLEa2HXxdo5q6CH+Kv/OENzNZh0g2fQjNQz/F2eUdfeTM2g8W
+ wS2u66z+7Bj+2l/0nYe1fKwNfov68z/x0Pabvvfe7dvf+4jbk75om9bQOb1LPxjWcd2LPh+G0Z+5
+ z8HndozLOQLucQn+mCdhV2VWf7t7FT2k6GjXChlPvcb9HM1YL+pdPFqc0F10Ud7nI0t6El/y8mTo
+ 0AmFd89nL3ECKrB1LyJPiAtf76cPhTmBsjnvOd2Hn18ReeMDiz/Hura9xU9sRPEhAWDPIrVNgolj
+ t7Lt4Osa1dRF+FP8ydu8N+F33n29Pj2L6byjD50Syuu6sJb2FnfiyzH86bvDx2K1/NG+okSv+fkP
+ bi//7nu3v/72h7ZHsGGd4aBuvluYJdyu3eCs47oXtHWuQO/mPgfyFj4zmcvfSS7BH/N03BVZWJ/S
+ bU14l1L12S7toR/8emgd9LDOfSw+p6Ff6yG6KE/8en87j9K7DsJGPTOv66fX5+WoIx9telC/cKTT
+ RuVlXxWZ/eCubzf8/IrIGx9Y3D0/v8tPbDTDprHVM91lY9I+s04897GhqV/K3uESJ3jlwTr8mW+F
+ X7yI7DqZjyLs8o4+cmbtd2LHp5/qjxWJb8aT39vqsXi0/hh4+Q/3PrJ9wffds33VWx/Y3v2hR/e6
+ sr/owfXQmSaHz6Pmm50zsH3OiQs+swn5ukafm89Z+Qa+7wEiyH/VhvWZ96p00GN83CvZdR/RxFPl
+ Gxb+g+e7Tp3Z6QfWdsddfmLzCQixENmzRLNNwonjJZZtB1/XAA/HRfhT/MnbvDfhd959vX5Tzbyj
+ D247QdVl23VUH3ovFJD1l73qIp/Cx6Lsj9LpFz90fftqPKT4sHoz/piC+zvoyt6iB9d6eEUIbljn
+ nse53XDOALWeiQs+s4n4ukYJr3MPLjNQxzwr8GqsVLcvVN2r0qEer32v/DhXP6yc31TmN510Yx36
+ WIw/pYeusxRXaOukhDxnM/Y3olHPzOv6XY/OvwrZ87kedtZ849zM14Guh/lY3qP7P39VqAu+Yd15
+ 5+7PY7lHJo2omCEGx+6TDcVkfzZRwJviGXfkT16quMvjxOLLi/MaN+NSh+Ndr/2I7AX3bbuO5Euf
+ ABKgqepsuyoQQar56Jv5xxS+8e0Pbi/793dv3/JzHyhBMh10ZXvpn2tewkP/Pg9uU7fSU2GHc2Z4
+ 7WPa40fckT/2jn/gfY7gK35yX6VhfUq3NelhxDqXnO7AePZD/Zae6ck6OK71PKWH6IRQaOukhOQ1
+ I/PoYDhj6BhH3llPP+WEG++PiiNR81UC5135krgeYid/fsU6Tn/DOvx5LPfIpHW5OEMMDs0oYjfb
+ IX+/XILvJzMCdvkYdwv8rsd1JN4qzzpdr/2dSCWqPDhcR/pJn3KosK5TddFfHfai7I+i6XXv/dD2
+ G7/rfdvX/fiD24OP8r+nSd/p76Are0v/XOs2RwhuzPhxfrUv/bHezaWfzvEWzjvCX4Tvc6o8mK7U
+ UN0WuvSOHrmvKZd23UesnhLfsLbTP7+iIqcfWPRs689jSTJeIuz2JeMlpV2XdTfbIX+/XIL3E3fP
+ n7y8nJfxy1+fADNOb6au0/Xaj81ecN+260i+6o9A9TnjEVDb6rH60/qj5OUn8EcTft+b7t1+9xvv
+ 2X7qITyoqIfeROnbbdPR55M+owdD9PDyfaDJ4fOoOedX+5Id691cvIoLPrMJ+brGJfiuFxFqa0Ve
+ iZX1QWXR0dcJ6lvHag+10q77KIvvw2WnGevWdNZ/6Nd6iC7K+3xkKSF5zdjfiEY9M6/rZ3UIQJ6M
+ lcdE5gOvcEBVAuNWvt4nbjv98yvmuPiBdX7920jSn1RsnlT4rbmKvOgTbjbBRLEvwneewd95qo7U
+ M/m0Fr0rax7WR3F2ddKsPlYji05wi+s6jVfH4pvx5G/6sUhFV3e+H/+67+t+7IHtc//9+7Z/9Ysf
+ RBs8Z9RLwXnCpYMs7ssf3bThfomTewrBDbqNs46+R9lnGnp38yl8zt2BfF3jEnyfMyKY56oN61O6
+ rQm6RLdUbKVaT/h9XsY1inpMHtkndBcdFXF86yQ96x7Ia76FI/2ej7lVL99nNfZ88CsN4phPaWfe
+ la+Ahbv2beE7zhc/sL7iZa/bzq+/bT4xmSqt9hO2HgqyUV329bCY2aqpHW7guX/kl32L/M7r/DMu
+ dTgv6k+e1YiqVHkItL/6KFuHpjri96wzyFmJYDZ89dbU5e//3EPbS7/jvdv/gZ9X8edWq184CeAJ
+ d9/0c++gq2CFk1sBXPWQ3rA03+ycgWHak/gRV4U0f+wd/8BzX7zFvwKvxsr9lm5rgvq+UOs60V7v
+ Kz00hp1urIPPq/U8pYfooox1b7x4zag66Bj1PBnfsJDwp7YvffHrXMWNrxc/sM7OzrdrZ/94PjEt
+ nVvoJz5E4eWRnZl5uD9H2TvcwHcexFhTHBLWF+GP/MIhonmqruDMYz7zdiJVqfLgcHz6qfysRHzx
+ 135tLwKtruTLm+55ePv873rv9kd/8N7tPfg3gdYr/bBflE1h+DYvHWTpGA+6xkEc13x3iYCGx+Tv
+ +wGX9XUc0yXtSfy4H0f+2IoLLvOJPK7q6ry639JtTdBDiqa9Vqj1gX9+00lH1sHHIF11X+sekwW2
+ 92HwvDpP9n0SOcb+RtQ4Xos9H3MbRz6PlWf2gTjydAGph3wdqAVw/w/qy24513TxA4uYR85f059U
+ vAzYSqv+hGDv2KEYc2asROGiRtk7XOIA4f6RX/bkHfgjv3jB0DwVF5zzjjyrERWo8pDQ8enHtg5X
+ fPHXvvD7/sq6MtO78JdS/dEfvGf7bd/9vo1/tmqnD6pc/cKg4Dzh0kEWdTrqGgdxck8huFH6ZB7n
+ 1vnhY7p1DGJSPU/EfUKqKzWoQwk9J+gRHVKuccZTL7/5Yzeq72fpKrvuMUCtu+ii/NxPHjOqDsJG
+ PXzoJG/P9ON8M1Ye8pGfr4gTrjeqHvLRj7EWr/HG6debP7C+8uX/gX/jHzn7yUnusknpJzuflCga
+ STXbwdc1qqkdbuC5L15EhD9z8w48Ei1urJzX+WdccM47+mC4geIRHWzXkX4Wr3jaX/uyq4xDPbV7
+ 26YPXT/f/vpP3L+95K534x8DP6CHA4tpnave1S+c1IPI6lOWZD7oGgdxXPOyHfqX3nBpHufW+RlG
+ f+ZRz62cd/Lt+G+SB2mu1LA+pduaoIcUTXuo2QoZT8tv/thpyjpQ79JVi/V+pF96iy7KE5/95DGj
+ 6iBs1KOHJXm5m5l+nn+NPZ/rYUXNlzjlZX0dyDX+dtGX6K9qD99xvvkDi+iza9+kVngZaOJ3bLlZ
+ LJPP2Q6+rlFN7XCJA6qfzFiHP/Ot8IsXkc1T9URM5x15ViOqUeUhoePTj21VJL74a1/4ajEEZd6u
+ 6UH8d39/9x0Pbq/4jvfgB+v3b/jvlXU+qWenDzZXv8QRxfOs/uTn3kFXbkUPuacQ3Jjx437UPtO0
+ /FjLln4Vl3uRGZicI5cal+C7T4DJfy/+RcOrfviB7R0fwL8Nvc1Db+rcl+ho2aELlZntWin2o334
+ 9dAqW5vCm6DodK79vim/dYYBXX0C1nvpz3OGS17zLRx1rPcF/aMe8ylM+4uPOO4jjn11Acm78hF4
+ fnb2TWa5+PXyB9b1a/+AGfvJCYM10ObQzGS0M9shf79cgj/F33nCm/kEv+txHTMOhakE1+d67cd2
+ L7hv23Wkn/QJoPqb8ey34roeEt6ewfK/5Wce3F702ndtf+It+GMKD/g/UvYn26qrdR66+JMTBIKl
+ LzKmv4OucbB/rnEu0Zkmh8+j5nFunR8YpmO85lFP36MRd+SPrTzBZa78O37s/ZN34c+bfc8921/4
+ yfprcbB3O4YepuvCSYC6XtBj6s7qrFDrCXt+00n91oF6Dz1P6SG6KOPzWfrznM3oe5P84a33Basq
+ 4OX3ixyIYx9OxA3F972jff06/lK1Z32LnDd5ufyB9Uc+812I/47jJ1Y/YSEKu5SdmQm5P0fZO9zA
+ n+K3trfG73pcx4xLHc7LsiheldeLZduffoxHgwZoSjzrqjj2eex39v4Er99094e23/qd795e/ea7
+ 8QN1NpW6MfOW1OXKPhHWyzMvk8pXaPrCLBxfD7rGwf7l5sJ4mhyTv+9H7Ss/1ru54hWXe5HZhHxd
+ 4xK8z9H1zTyU53/Hf8j9Wd9z7/YP8B9203c7hvVBdvaxJp8XClpyUte6j9zHL5/XCb0nj3hHHGz2
+ Kl7dB8e3TnZ0Xt8bBPiEq8w9n72sn8wee770gTjydAHcTx8JvPba7ctf8O7wXDRf/sBS5Nk38YnK
+ FpWTcxXpJzuS02YRKT5zMl+CZ9yRXzbjwpuZnAd+53X+GRec63Pd9oOjF0UH23WkH9sCqo74a1/4
+ avBQT+0+odMvfODR7Svf9H48rN6zvenuh6sdnlDqxswTg24ZrXPVu/oFQjDiR7zoDrqSLHpwTf5D
+ /9JbMOefNtOQdjePem7lvJNPvLkXmSvvjj/5Ks8vfvDR7Y/9yIPbF7z+nu3776m/MgeYJ2tYj9Jt
+ TT4vFLHktFKtHxB6aC2ASrYOfSyIv0B30UUZ4ut9Jz6esxXob0Q6qfDW+0L1GXj5/SIf4sijtBWn
+ vDPf9Zv+sN1V3ewPjgah+Zn/GJfoPqZKq/6EgF2XdTczhvtzlL3Dseixf+SXfYv85jHfjEsdzut6
+ 7UdxvahyYRNHcSdeQNURv2edQdqsPmbLT9SaP1D/az9+3/bSf/uu7e//7ENVb9pxQdYDdfLEcgtR
+ kPur+ttmvzCoh/DTz72DroKRV2jzH/rv/NKt9CQ7bMkuVsfLrnjF5V5kTj7OGZfgbzXPD95/ffsd
+ 33/v9pX4D77fyf/Nz5M0rA8FLB09+bxQQ7XHFX4P/WD3/Ry1Wremk848d+fhfukuupzA3E8ek/Y3
+ IuUP756PyMvvF1GII48PmhtVD/m4ff7Adsf5t8pxycutfcN61affh7/U71+4JRaJJPUm2D3JKUrt
+ zzeJargE3096gMOfmV3t8pAweUSeeoybccE53jj7O5EYRAeH60i+xSue9te+7C6gFk/s9C9+Af84
+ g59Tfd2P3LM9gAeX+2O9SzdWoH4585bwVtRwf8OPQF6m9M8IHkDHw+TGMU4BxMk9hXCiFe/802Y1
+ jNvNKqDy3sJ5u+CL8V3vreQB5lvxj4evwM+3/upPPbR9gLo+wcN6lG5rgi7OXXKgCivV+sH2ee1r
+ lH/ykCA6kgW29BadVupw7SePG/e94dp5RFf3QLtV4OX3i2jcH/J0Aamn7u352T/a/puXPkDkZePW
+ HlhkObv2GrdUEtabwE92PimtVp7o802iIi7B9ycAwMpDsZl28lL04jnye9/+GRececxnfyda5cFB
+ HMWdeFWkOuJPXZwVPhZlP87T2+5/ePtd3/WL2xe//r36gfr+E4v1Lt2Y2npg5i2py5X91qdw7hcG
+ HcKPePV30FUwJ7RbwnG3R+dXYaUnvNZXWXy+2DvWw3qt/4pTg80uIlk7XOLkrvuDtfgz14Ht4gr/
+ EP4N61/+yYe2z/7ue7b/Dz+gfyKH8rOyOrg+P72rve38VHjpoIfVsFOj+2m6G/Sj3zqTLooQn/3k
+ SVafLxDacH2jDm5gXH6/iEIcebqA5CUf/ibk7eyW/nGQTLf+wHr0Q/8cCd+VVvuJz+bZ9JxVI5Fj
+ SCRqdRrfT3qEUIq2L8Bb9MUvXkQe44KTn5qBz/ydSCQqr/3pp+qV2Kx7xseuGkSw6nm8Vvc+fH37
+ H95y9/ZZ//YXtrve80EW4Dp4+qOR2lZ/zG09MAtHoMfqH/EYttkvDMGKv/rxdNDVgVUH4yQMd3t0
+ fhVWesLb+bFW+Zk7H/NH/xXnApueRDKU5wT+MeUBU+olKf/R8FVvuW/7z99wz/aW+56Yn285X+m2
+ Jp+X6mElHD6Y1KcPFygX25iqf/JcpLvoorzjZElP8prR9yb5ieN5rbzJf/n9IgfidA+xrASMJ9/1
+ 6+fv3j700L8l6lbGrT+wXv2Kh0H4mrTqTwjeLau0m5mZ+3OUvcOx6LHvo0FPCmeTt85vHvPNuNTh
+ vOazH+S9qHJhE6dLwbrKrkXbDou/mqw+yvqIJ/5Tyd99+/3bi//Nz2/f+BP3bf1//EtdOOxVP/uO
+ iQVG6yocK/Zwf8OPQPcLv2DpKzyMO+jKra6DbgTSHqPzC3eTc2Y4fp/Ej/tx5I+tuOAyF594w5+5
+ 6tzFjfxYegD3Rvww/vNff+/2x/HzLf7Fho/ncL+l25r0JmaeJSd1HfoB0fdzFOR++lis5yk9RBdl
+ rLssJfQ9Uv7cL84YdM+8rh/7wpHBg/uLz3GqP3zVmHHId+3a39/8bAnFTedbf2CR5vr538ST8RH3
+ 7CL1pGWREIddaSaW9hxl73ADz33xIib8mZt34I/8zuv8My4452VZlUeqIlnX5cT2px/jVRFxIF7x
+ savJ4inrI5re8L4Pbr/pte/cvvpN79veg3+b5X5GHtbBa9EO1hsTCwz1y1k4Aj129WNr9QtDsPQV
+ HsYddOVW9JCbBRlPk6PzC1d61r7Kxno3V7zics6ZTcjXNS7Bd5+IuDQPMKm3E1Td1zF/yzs/sL3s
+ u+7evgF/HOLxem45X+m2Jp+X6kkl1HXoB8T8ptMo1dvH4n6Gfq2H6KKI+5YlPX2PyNn3i/eHNqaZ
+ N3pdfr8YDV7yOBE3ovcj16+d/a/auMWXx/bA+sqX/Udk/Rb3zOywIAq72c12yN8vxGHscImrffFi
+ Hf7Mt8IvXkRynnHYYNrK69n+TlR+245PPxUnsdnnjI+tcDpq8eFPP/fQI9uXvuE92+ff9QvbD+G/
+ ++NY/RQv87AOnv5opLbdZ8UpXjgCPRaf6139wi9Y8bdujDvoyq2ug24WZD66OMjb883OGSC3cQI/
+ 4o78sZUnuMyVX7zhz3yTugBZQ/2tvvnzrf/5bQ9un/s9d2//4hc/8p9vWZ/SbU0+V1Sx5KQudR+5
+ j19880ffFGwdHNd6ntJDdFHG59R48Zqx7xfvD4bkGHmT3zgyeHB/8TlO9ZPHDgEr/lu2L/mMn0/s
+ rcyP7YFFxuvnfwmveMC6SM0sknZm4srPpcYleMb7aECDgLYn7034lR+Rx7jUIT81Sx6Jp0SrvPan
+ H+NVkeqY8eyXtsLHouzHMH2AP+z9kbu3l/zrd27/7zvur3pMvKuXntTB07dQ2GS9MVec4MIR6LH4
+ Fk5vApqCmUh6Ycv9HXQlVdfBuCkEnXQv/nkvOj8wKj/zKfxNzrsKc57gMlf+Hf/N8hQe0xrqb/Rd
+ np968JHtv37zvdvvesPd23+s/5pgBd36yvqUbmvCaUW3cPlgWk/4fV7GNSrngW31PeonpnUXXZSZ
+ +8ljxv5GNOqZeWc9Ov8qZOVxfT5W6EgeFyYkvrniD7df+4sVdsvTY39g8VvW+fbP+glbl1U2qsv+
+ bELVEIexww089y0ZRCxcZl7OXZyJ+NrDeY2bcanD8c5vP0J7wX3briP5ql4C1eeMR0Btq4jqrwu6
+ xcW3/dwD20v/9c9tf+Gtd28PPoJ/X1I8cx5lrjp4+u1gvTHZSOrGLByBHu5v+BHoT2z4BUtf4WHc
+ QVduVUKheCurbro4Zv19frWvsrHezRWvuFs47+S7CN993kqeUS+WHupv9N3b1uW77n50+zx82/pT
+ P/rA9j78i5HHOqxP6bYmnxfIlpzMV/eR+/jl83IdyWsdHCddR/3EtB6ii/JzP3nM6HujSG2IbtZR
+ BV5+vxiO+vELkwvEdO3s7J9tX/YZP07vYxmP/YFF9vPtr/QTti6rbDSRfV7S3Sh7hxt47lsy9MQU
+ sRkXXGYSF19yOK/zJ16nXjjnnbyINFAUgsF23uqjbAFVR/yedQZp81BP6rpo/rF7P7T9jrt+fvv9
+ 3/OL2zvwqa0x+nM/M18xpQ6eftfPemO6oI4XjkAP91f1Y2v1C0MwE3W86A66kqrrYJyE4m6PFU++
+ 0hPezo+1ys9c+iku+MxkLT+XGpfgH1MeEKbeYq/+Rt/lWLjzDV+Mt/8L/6H5y77z/dvf+mn/fxs7
+ /pKFeUq3NeG0JPhol/bQD349tA56iG/y0D/0o196i04rVbj2k8eF9zeiUc/M6/ohE/3Ik7Hny7Gh
+ fuGAqrofffTsryTmscwf3gPrj7z0e1Hk65jIT3aIwaIpSorPnGrK3uEGnvuWjCKYN3PzDjwShVmz
+ 8zr/jAvOeScvwgyseNuuI/1UfxKb/c342AqnoxY3n96Pn9p+zX947/bZ/+ad279/zweWXgwb/bmf
+ ma94mYd18Pp1/aw3puvoeOEI9HB/5uXO6heGYMVf/Xg66OrAqoNxLMh56eLo/Cqs9Kx9lY31bu58
+ zB/9V9yRP7bynMB3n7eSZ9SLpceoO3XSkb6wEo72vfhm/D/ir5z+HPyPPPg/9LiVYZ7SbU0+V+UJ
+ C/MsHfTQGHajVK+PQfWO+olpPUS3Olr7yWPGvl/dJ6sYdZCfvPTz/Gvs+VyP6heOAYp7Hb5dfW9i
+ Hsv84T2wmOH6uZ6QetKiiN1M/2iCZuwdLnFyQwzO+K2Zl7D22eQuDvvh45JDfkRwnnHBOd44+xHU
+ i6KD7fjkW7ziaX/ty1b6Iqj1iYmfxn/rbfduL/qX79j+NuZHSh/XXQHpE2b2Xc+gZxzzUinM7o/1
+ xsRixgtHoMfiWzi9CWgKZqKVn3EHXbnVddDNgsxHF8eKJ1/pWfsqG+vdXPGKCz6zCfm6xiX47hMR
+ l+YBJvV2AvU3+i7Hwrlf28Dh10/hf+rxu7//7u2VP3DP9rYH+g+iNOVcJK51tOziIW7JyTxDPyB8
+ Xs4fTvHVMajfUb/58r4gXRRx341XHjP2/UI+x7OKUUf0p198FYf9xZc+rE858L8cvPbnjH7srx/+
+ A+tVL/l2lPr9+qRgkSw6M+sYTaissne4ge8nM8CUqO3JO/BHfvHyMIFPvC8D5TOftY0fmwaW37bj
+ 00/FOVD4Pf9o89ivWP3yHe9+aHv5t//s9jVves/2fvxF6kqr0wt/gUd/7sf+UaYTYsOfbIhTXtab
+ dka/dDMPeDN29Svcl6loFBGdGOO2DrrGoTpgkN9AejRm/fNedH6gWBWr1VzxiosOmcl44I99Ef4x
+ 5RG9dWMqDear/KmT+8pnQMHcgd7M5f/2d39o+1x82/oz+PvI+PdwnRrmgU95evJ5iSdRxR99gOAN
+ WnUYJ3vR2T/0o199iE4rBa795Ck+3RuuuZ/6Vt7kv/x+MRpxi+/7ti954V3c/XDGh//A4t+7fL59
+ vZ/sEAPiUHx/cqAU2nOUvcMNPPctmSVqe/IO/JHfeZ3fZ+J6gnNellV5cmZdF4pFoP3px3Y5hr/2
+ ha8mi2e2/NP4t0i//7vftf1O/KzqP97Hf1QY9en6hL+iRn/uJ/VwHnmYl/FutHhjGtjxwhHosfpf
+ OL3ZaApm3o4XbNSdQjirDsZxIWBlcd00xHPoi2mI3s0Vv8OPuCN/7Ivw3N/xJ9+pPKkTcw/iKn94
+ 6FM+gdyvbeiDX8IVP79B/w38jz74P/z4pp95CP9A0syOFg4RytOTeAgoGq7wu+6jLOZZNrY0rMPg
+ Ee/CtR6iWx2t/eQpPuYlTPnDu+ezlzgBFbjnc5zqL75Hz7YP62dXIsfLh//AIsOnvuQf4RX/1hAi
+ sujM9I0maMbe4Qa+n/SAWlOIo7DBO/DhIzWHn/iuY8YF57zG2Y+gXlR5sF1H+lm84ml/7ctW+iLw
+ mn/r51/4ofdvL8E//vHfAnrwUEd9vg1Vd0FGf+4n9Qx66Qyb8V0/eWMyj+M0C0egh/sbfgT6Ext+
+ wUy08jNu1J1z7TronkIQv+ef96LzA6PyMxev8kaHzCbk6xqX4B9THrCm306g/kbf5Vi4qTNw+KV+
+ qi4wKoL/BvFP/vB92+fhfwDyve9fP98yT+m2JvEwsGnE4zq0rzzLVhLhc26l66hfcbBdHyyeV9XX
+ OikheYmm13wLR96VNzoYRz6PPV/6QBx+nV/ffnz7Qy/8p8F+OPNH9sD6gjP8662zv8wu/YSvmZVI
+ lFFS2Ttc4gRnUyX2tBkXXGbSHvjFS1GwL56KC855GRY/OAwkm2E8I/nTj20BxRd/7QuvcG6QZfuH
+ +HNUn/HP37H9xbe+b+PfVqK8gviSdH5dn+kHaPSXuMbnTqQOxnf9rDemgR0vHIEei2/hdJloCmai
+ jhfsoCupug7GTSHoXH2J59CXygZmN0u/igs+swn5usYl+O4TEZfmASb9dgL1N/oux8It/Zjh+A0L
+ jIpQmSjgR+5/ZPvC77t7+5I33bP9NP6tsHngUJ6exMPAao8r/HYd2oft8zI/9zjEt+jKHnEglA6i
+ 06rjvJ882nYddCi/65l5owP71vk7THkXn+Nan7OzvwTsvvCKu9VJJd0q+CTu350/Y/vJN//02dm1
+ T5bKvLynBvcpWl3C3Qy8mpzzwDWvDvc0v5/sdZjkqXwR84Z8ndB8C16XTxsiwksdiqajH24O4D/n
+ lz57+/73+X/4wMPtOANku072e/QL5Lq5ZH5NzNemF1VHATCVroXn/kV5en/H7/iZiDiNTLBdR28c
+ 2qt9R+1fc24187ITLb5TM/3MN2bpwfhTg/us9zB/WHkmf/HdwKM60gECyl4PrZBUvdWoedIXw9qB
+ +qt80uGX2CucOw1ov3mSSbPqHTywoyP93Ufyive4T6DYqj5FamPWXxu9r0W97PNgE/lQyc8/+qs/
+ /VM2fcmZ6Me2/si+YTGXv2X9bxK/LplKkCijmLJ3uIHvJhHiw8qhQb3gMpP2wC9eRDYP/cAH57w0
+ w9uJyGaY4PTXZSi7L0v7zcNCuwws3sCHFdOST6c+LxVvwaivboXrZgUYo7/s7+olphKIX4nCW3mr
+ oI5nHupQY/Exzn24XxiCEe99+/k66i7+VQfdCiCwR+dXvUuHzg8k07n6mc/1Kn7osYSuFLPP4DKT
+ F/4df/Kdiit8MXsadYeHjvTlymOjP/wSrviXH0FwuJ7SoexyzEk8zsNXDiuUvM6z9DSm6hBv6Trq
+ Fwts1weL5yXe1FXxQKT8vl+NI+/KO+u5+f1idsX91Y/0YUWmj/yBRZYHnvN/oqgfY7frk5mijCGR
+ qJVV3c2A6ZOAM37riHj5ar95b8LvvM4/4yKm8408KxGycB8vCHQd1UfZ5Rh+44xXuAmEr/qrE9el
+ DEpgfsDlL56i6D5hJ67xrI9D+lW8G8Um6628amTEM09uocKHrm07XoILP+KV96Ar4pLQ7mqc+zVm
+ /ce+VDZwu3nWnXPOTM7yF33byhNcZsGrT6wvzVP45uZCgo6+y5m+ANCObeDwS3m6zvgBg4M4PWzE
+ a7sccxIPiZtGeVyH9pVn2dzjsA6O6zpO6cGysA8kw6quikeFyav7SVjjWObKGx2ME5DgAx9t7p7/
+ 2PVPuhPPiI98PD4PrD/xAvxt/udfw+ry5LUoo0CJRK3QXHCZAeO+tMTampZ9Af7I77zOn3iptcs7
+ 8uTM2u/ErqP6AFF4la/sxU9/9ag6bcuPw2Unjidm2csf/uI46KEo8DpfYZKHfO1gnpjMs3j9SUmg
+ h/sbfvE7XnzFm7rJmz6czvxJaDc8BlaWPX+fN7ydX6ywM1e88kaHzGQ98Me+CP+Y8oi++mIuDuar
+ /Orbu6rfS+OVn+eMXzt91FmVLXnor/tQtk5MeSod0844JbJCzmN/81RNnKzD4Bn1x+/6YKEvMu33
+ k0fbroOwxrHaqp+7dR6X3y9yXPua7b/EM+JxGI/PA4uFfMVL/9X59fN/mievRRkVSiRqpdPaz4Bx
+ 35JZorYvwB/5nReXIjwVF5z8SLD8SKqEOhXD2m8e+sMrQPvTB+fqUflsixaHywSOJ2bZy188RcE3
+ SfBzFv6Yh3ztYFxMAzteOAI9uO+whdObjaZgJup4wVzXjEtCu+FpIVYersRz6Es8lS5pVz7mLx0y
+ m4iva1S+Hf/Ac/+W84A1+TsB+YsvPPQtnDovG/Xil3CtQ/wIgsP1pK/wyFF+TeJxHr5ykKfiZDHP
+ songUF2LruyFc37iAEZf5nWcLDvslxdAOpTfcTNvdGDf5iP2yMeN83/6yJd8+r+S83F4efweWCzm
+ 2rX/Fk/e+1SXRBkVlu1PAohOGyL1kxq2jwbbCON+5sYN/BSJWcxjvhkXnPNNXgQZyHDDYDtv6lu8
+ ArS/9mUr3ARlY9KlYgLXpQx4GfX5Ngw/3QufONdjerJ0HYxXIl23bDdfxwtHoMfi020U3p+c8Atm
+ 3o4XbNRd55iEdiMw+yMPl+I59KWyKx3jZVf8Dj/ijvyxL8Jz/5bzpE7MPVhP5Q8PfconkDovG/rg
+ 1+wDSKOqQddT5wugebSQAEpH/hnXeca9UJ5lC8K4ImieUT8xzs8ZBvpiJo6170Lllxc2YY2jufK6
+ fnqJE5DgHR989z+63fnH5XicXh7fB9aXf+bPoK4/q9pGE9P2JwEOl36o009q2JbMEnFf9sQN/BSJ
+ /OYx34wLzvmMsx9BvSjNYTtv6lu84ml/7ctWdyYoG5MOlwlcFzG+JOaPn2HrsI96KKp1oIUhPSpe
+ icKbdswX3ss+AV0P6wQ3+Vin+ggP9w66ClY4uRXAVY/Or3qXDs6nLEqXtCfxNzlvF0w5WEfxZ0YV
+ jylP4bt4LgavZC5n6nQHlZ/64JdwElIEipBZeuqhJt4V5zyVjmmpP2dP2on+2leepSf3OKzD4FGe
+ hWs9JPjqaO3L0Xl9b8Rc/Kxiz2cv4qB7xuTD7tdtf+gFPxvf4zE/vg8sVvTxL/lG1K8fwO8KrKb8
+ SVBPaojaT2peNgREyt2TP7jMJB4i2XTkMS4452VY5VmJGG6YtKc/9RnPayRA+2u/tptgwXS4jFNe
+ A/BqG7Dyh18A3bbg5yw86+Vg38xDpdpB3pgGdrxwBHpw32EL537hF8xEHS/YqJuJOLoOrOvNof16
+ WfHkWzp0fobhN9mO9TR+xClfcWuqOpQnuMzkTZ/hz3wqrvCY1lB/o+/ypC9X7jzsQG9mYG70YxMN
+ up7SoexyzEk8TFVlciWC8DrP0pNYDvnFK7qyF875ixc6mddxspSQeNG5DjqU33H9vuBuAX0PBVTg
+ ynP+xuvPfOE3avNxfHn8H1ivPMN/9Xntq/pRnWIlEsTkjGZ3MzD9ZMZaR8TLV/un8Ed+8SGieSpP
+ cM438uiUlAAv3McLEjo+9dkux/DXvvAKN0HZqrvejq5LGZSg65M//MURXWAmrvG5E+oLfr0LBCze
+ pRvZOl44VuSx+Exom/3CLxgW6iN+xh105VbXQbcCuNuj8wtXesLb+bFmuqQ9iR96uMCmd/7iO3U/
+ HlOe8Ax69zf6Ll/qdOXuR/qgE/UjIQke+pWeethEN+FKtzUhasQppxVKXj006jyqJKOat3SVfUJ3
+ 0anSjpNV9WiCp+/XqKfrp7+AxpHBo3U/21696VkQz+MzP/4PLNb1qhf/O/wA/pt3JfJSY/iToJ78
+ aLqf1Lyc9OO3NS273gy7OBPxtYd5zJf4XDqCHO/Zfmz2gvu2idOlUN6KI3DYi7/inEB8BUMfJnRd
+ AiiB+REnf/jpxzjowa3GWz4nZDmM7/rZd0wDk9c4Aj0W38K5X/gFM1HHC3bQlVTRg2teXtpjrHjy
+ 3eScGY7fJ/Ej7sgfW3HBZS4+8YY/c9W5ixv5sfRQf6Pv3k6fnl03cPg1++AJcSid5LnsXpUOM84M
+ eB36Kc+yBVEe6ux8XccpPVgW9ld9s27ymrHv16in3xfJJxbmJZ8H9Tg7P//mh//gC9+YvcdzfmIe
+ WKzw0Wf/KRT/ni62mtKTmU3RzgxQP5mxtqa8BN5v3MBPkZhDfDxM8M644Jxv5MmZdV1O7PjUt3jF
+ o7OZ/PQzO4b66Ql3xwlclwB4GfXJH376MUZ/iXM9J/LodiJGBZB36UaqjheOingsPhdu2/ESrng7
+ XrBRdxpOv6RF3a6jkmBa8SzM8fR2fqx1TpmLV3HBZ3YgX9e4BP+Y8oA19XYC9Tf6LsfCLf3Yid7M
+ O574sdn3pnQouxxzEg9TVXtciSB5nWfpSSyH/HUM0nXUH7/3YfG8xOu4xrMPl+066Ggcy1x5Zz3m
+ I1bjvQ8/uv13MR7v+Yl7YL360/CwuvZ1XbBEolZQAarsZoD0ZOaM3zoiXtbaP4U/iGQ+RDRP5QnO
+ +UaelQhZuI8XJHR86rNdjuGvfeEVboKyVXd1oryCOEHXJ3/4iyO6wExc4xnOob4wMV6JwhvTwI4X
+ jkCPxbdw/uSEXzDzdrxgB11J1XUwDoESkA6PFU++0hOuzo+1ys9c8YoLPjMpD/yxL8I/pjyitx5M
+ paH+Rt+9HZxn5UcnT3/DKtm28z+1/eEXvrfketyndZMfd2oQ/sPzO7b73vK9uHevyKXmk1mXac6A
+ 7i4v7eHX5YTd84lahce12X3S1aUjfPLxqpGO72UvPBlel0/54gew7M7TfmA42l+06Yj7Gk7oeMBv
+ 8Bcq+Job3zRYqJzaEM6ffG7H+x13yNP7O/765NzpEf5V1+Sf/bq9whu+f805nHgIMEq8cwZOdY7Z
+ +Yg8MQ78uScXPrRAobyDP/l27MV7Aw/3oysDyl4PrbBUvQU3T+4/w9ohOqUjHX65vsmDHeHjN08Q
+ mougebCYfXUf4onyriN6mMesro9r9zHrN27tl/3Gh//gp79C6yfo5Yn7hsWC9UO3s6/CATzaYkvN
+ EhtvmojSYiIs4mXmZRUuM7klOhce5jFuxgXn+Hk4iDNQBDlD15F8xs/bZH/tI77LUF+2RVtvQ9fF
+ FL6cHT8ugQrgy+gvcY333egEvNSrftYb08COF44VeSy+hXv6GxZltR7RyYKO+zT0KyU1OQ44/PJ1
+ Ck/0BUz3hP6b3as6P54XxiqHdsVxX3mWTSyH6lCecQ8O98n1AYx9MnHs7wN5ta08rHvhyLvyRi/f
+ Q+zjh0Db9oxXO/qJe31iH1is+1Wf+f14/RsWiVpZ1d0MgD4JOOO3johi1z5VPOLDB4iG/IhonsoT
+ nONHnpWo4jEhoeOTz3Y5hr/2hXd+5SlbdVcnrosYJ+z6ZIe/ONIn0bpUnsXHcA71hYnx7WC9MQ3s
+ eOEI9Oj8O37Hi694O150B11J1XVgzVtefM5Cc9Rx6EtlM4w0mU/hR9yRP7byBJeZvODb8d8sT+Ex
+ rcF6ii88dKYvVx4b+uCXcNXH8iMIDtdjHWOXY07icR6+clih5HWe4jHAKNXLPKJznaf0EN3qqHVS
+ 3eQ1ad8v5Q/vyjvr8VPu/G88/Ad/wxPyg/bR5uP0Hz9PxlPr5z3jT+N/QvZ6uvxJgMOFmFSnn9QU
+ l378tqZlT9zAWyQyepjHfIn36ZExeT3bj81ecN82eXQplLfiCBz24q84JxBfwdCHCdMfkEpgflqj
+ LsZzjP4S13jDVx2M7/rZd8w9bz4BnYDhQ1ds2na8+Ip35VdhN8QlobLxlrOAMVY8C7vJOSOm9cRa
+ ccFnJu+BP/ZFeO6LN/yZi2cXJ/p9/eKv/OEBzPVxsTs/9Idfs4/lBxQO11M6lF2OOYlH7F0OF0M/
+ 5Vk2sRzuh7Poyl641kN0q6O1nzzFhzxqiLP4ae75tM+Kz7Y3PHLfp/33Aj7BL0/8Nyw28MoXfWi7
+ 9szfh9bu1pOZlwmXger2k5qXA9BIyX3ZEzfwfBPMYR7zzbjgnA/8zYtoA0UjOtj2pz7bAqqO+Gtf
+ +Kqi/UVbnbguYtxZ55cd/uIY/SWu8Wk3eRjf9bPemAZ2vHAEeiy+hdObjaZgJup4wQ66kqrrYNwU
+ gs7Vl3gOfalsYHYz+RIXfGY75O+XS/DdJwIuzQNM+t3xV/7E07dwo15k8DefU34EgcD13Oxepc7w
+ phLaFcf8+OXzMq5ROQ9sq17ZIw6290m3OmqdpCfxZmQeBXDGEB020n9mPK3ufmS7/srt1WcPO/KJ
+ fX1yHljs4cte+Pbt/NqX+5MA4lE0iijxqCHEACxStj1xA2/RlzjmMZ94Ki44+XkGybMSiYRwnZH8
+ qc/4cgx/7YuvalA+7guGPkzouohZtvukHX4tEbjXI373U5jkIV87GBdzz6s6eNs6vHRmAIb1cLzv
+ polSt2Guy+kcl4SyyF98lUa84T/2JR44d/Oop/FDjyN/bNUZXObuK6qP+VSewrPeHsQVX+qkL7qY
+ MTb0wS/hWgfrJBOOpTN5V5z6kB09RpyKoW39lV95li0IXqwD5+IZ9SsOtuuDhb6A5HbVVfHKo231
+ o4DGkXfljQ7Xr13/0u0P4L39JI0n74HFhl71mf8ET+ZvoKp6QmeGnLO7BAAAKHpJREFUi7aPxlK2
+ TXGDy0wuic6Fh/jAcIwLTn4eZvLkzIpHU/tTn/GqiID2177sLmDCdLjccF3EOGHnlx3+4hj9Ja7x
+ DOdIHYxHfvfHPDEN7HjhCPRYfAvnT2z4BTNRxwt20JVUXQfjphB0rr7Ec+hLZQOzm8mXuOAz2yF/
+ v1yC7z4RcGkeYNLvjr/yJ56+hRv1IsNT8RsW5PmGR3//p39Ef0d7632Liyf3gYWirj/32p8+Pzt7
+ vT8R9k9sXoFcDn8i8b2AHVzO3czmuD+G/Lg2x7jgHG8+5VmJxCI6OByffJU/b/r2177sKkJ1ch/l
+ Yuvpb1g+H+t+k3OGVtKrznOHz7lT4vJzqXEJ3ufIc8h53CQPMMprZr/qIMd9Kt/Cjf547/Br9uHM
+ VbbuST3UxJt8cqiw2q57U3HK6Q6S13mWnilZ/kXnfoZ+9Ls+ROhhv+r3fvKYUfeXDinoeuY3rOvn
+ 19/w8H0veFJ+buWK/KqS5saTsv67P/Jrz7aH3wQtnn+zT0KJTHHr8vR8okjzHD7pEge8/Dw0HqJs
+ vPTCZ2h4XT7lHcCyO0/7geFof9H6etTlEAAvI/8NfmJcZy3aHmWOPHV0zDt5ZZvHV9B+X9Kxv8O5
+ rr0e4VcZQ7fsY0aCohkL43evOYea+80DUFW3n3NOY7a+7OjEOPDnnnxYeSb9RfVyvyvGsuz10ApJ
+ 1Vtw1+OHDXWjLR7lMY314D0uWlENHPd53vjleAH8cuSBnXtPQOuRvDrw4z6BoavFDneG/2XZ+d0P
+ nz/j5dsf+HVvN/LJe33Sv2GpNfw86/xR/wfSEb3FBMCHlUODaDhd4TKTRKKLTS/mqcsgt+OCc/w8
+ HIDGrcgZuo7kM35/qWZdowwSgK8mHDEPe16qZSvtuARqgC+jP/dzqJeY5GF81888MZnHcZqFI9DD
+ /Q0/An354RfMRCs/4w66cqvroBuBtMdY8eRbOnR+huE3ozRXvOKCz0zeA3/si/CPKY/o9/W7v9E3
+ a8BQPq+GDRx+zT7cWZUteaIz9QiPFmVHD9ex2rVCyXvRw8o6OF/XMfRrPUQnRNffeBSWvLq/dOiE
+ wms98D+Dws+tnvyHFau5PQ8sZv6KF78GIn4zPwE4/M0lhzZs+qGicJkdoLi8mMc4n4nj9GYqfmrf
+ eZi2D4/7tu1PPuMFVB0znvwVxyLaX7S+vspHNwD4PeqTbT65+TL6cz/2jzJXHsa3g7wxmWfx8mHU
+ t7D2HbZwehPQpKN4V37ujbolFGHkFdr82SccY8U7/7SVH5jdXPHCRYfMJuTrGpfgybPjR6TsU3Hw
+ pb5OQFzlDw99C0fBYkMf/Jr88JQfExyuxzrGLsecxGNeheOFPBUni3mW3SjVyzyic51Dv9ZDdKuj
+ tZ88ZvS94Zr74VVm/NzqBU/qz61UQL3cvgcWC3j4k/4Y/ln4h7nsTwCsrSkvgfd5Cv4EqdkBfO0h
+ PyKaB4fl0+PhhGfkyZkRJz9ekNDxyVdxdgz/5FM4N8qvSZeKK9elDErQ9QGh3covI33CSFzjDV95
+ GG+BgGaemHtef1IS6LH4Fu7pb1hL7+hkQcd9GvqVkpp8TsDhV99Xe8qP6ZbuVZ1f3wuF44XntO6R
+ 8yy7Ucf7J3vh1rmTTpUqdO0njxl9b7jOPdH+Wx++51Of9J9buSK/3t4H1qs/+cHt2jN+H0R7sJ/0
+ JVHb+sioTxQcAvc1MtuqffslfcX5cHhGvDWe7e9Ei679yVdxDjzEh68LKL+PWJ9QiOt6fZ1lK7/s
+ 8BfH6C9xnLtewtKH3gVlK0/lpV/bNQtHBo/Ft3B6E9AUDAvMKz/jDrpyq+ugWwHc7bHiybd06PxA
+ Ml3SnsSPOOVrdgYysuoMLnPt7/ixJ/tUXHgw91B/o+9ypE5XXvmpD35N/uVHIBzuu3QouxxzEg9T
+ VZlciSB5nWfpSSyH/OIdfZ7SQ3SqtONkKSF5te066FB+TQ9i+cVP1p+3chU3vt7eBxbr+dIX/TCE
+ +Op+0mPLmvISQHyIThV3M+O4P4b8iGieigvO8ebbnVnxaILD8clX+VmJ+OKv/dpWGe2v+n19xecy
+ dS2KH/HyF0/6SJ+w3Y/nrpe45NG7oGz1vXQzzPr4k5IMHjt9sLX6hSEY9Z75GXfQlVtdB90K4G6P
+ WX+fH7ydH2umsyozH/NH/5rJynxzlK08J/CPKY/oT/AXb+pk+vTlymOjTvwSrus0n0zJQ3/6WnGt
+ I9smvxShn9k4uFg6OM+yBSGKAcpTPLIXrvUQnTMxdu0njxl9b4TQBrxf/aFXvuCt9t6+19v/wGLv
+ X/aSb8InxDdYsjoiXha4/MnBJ79PQzNjaI/hfeNmXHCON5/9CO5F0cEmTpdC+WwLOOzFX3Gso/1F
+ W9ev6x224mWHvxrBJQt+zqPMlUe3s/Ki4kp/Y7xwZPBwfyuvbcdLj+Jd+Rl30JVb6Vdu8NMeY8Xv
+ z63zA2sdaq54xUWHzOQ98Me+CP+Y8oh+X7/7G32zBgzl82rYwOGXzyk8nlW25LnsXkWPEdd5XAfN
+ 2/ENCz9k/4aHX/kbvknl3OaXq/HAggjXf/rFX3t2vv29vsS8rNj3JwcOGzYvkWaKRnsM79s/44Jz
+ vPnsR3Avig42cTf/JJx1jTJUn23R1tux6x328jtftzH6S5zrOZGHfF0/+45pXTpeOAI9Ft/CuV/4
+ BTNRxwt20JVU6Zdrvitpj7HiybfOrfMzDL8ZpbniFRd8ZvIe+GNfhH9MeUS/r9/9jb5ZA4byeTVs
+ 4PBr9uHOqmzJc9m9ih6uY7VrhZL3yf6GdX5+/e89/NZP/Vo1ewVeqPHVGf/u/Blnb3/Lt+Pofrs+
+ IXFq+QTtNwXfHCeGP1HrUsC/iytbl5hvAtl46QXxvlzNo40BLPtGPzAc7S/avB25r8HZ/fgK2lZc
+ ITipbi+063ymrw0l2OMGb+XruEMdvb/DRWcV0Hm90KvqslzVz+i3Agw89cp3n/CeL3yYIFaq1Ln3
+ ec34W+A/9bAptaPGxXkmf+W9oV7uNxOWZa+HVkiIwyi4eeohiIJoi0d5TOM6eY+LlvGLoCz6zaON
+ vBx5YOu8lcf5zLuvv/srnNIx6/n5az/4y3/9Fz4e/4v5lPiRzlfmG5Ya+YKzR86vP/OLsNafhG9x
+ cdl1uJkJjrgK9GHw8Hfi500iOA4JhMuPzXErRNf+5Fu8ytf+2pfdBcy7hzP3pVDdgixbaetWLD/r
+ WZcw+7t6ycNCmZfxIgpvTNqpOzgCPRbfwj39DWvpFZ2s87hPQ79SUpPPCTj88nFYVyrPcev3qs5v
+ xpkBr+NeKM+yBVEe5OO98ISZi4Vb5046VarQtc96idc2/3Do6z/4rE/4oqv0sGJl6ya7zqvx+s0/
+ 8svOzh/9Pqj3qfomUeL7NE6X7G8O9cnDxvKw0uHY1uHwEOXHSy/or8OW359Myw+gAMXDS1N2K9j+
+ opWj4qSqL4nrBM8NfoHMyyX5NI16vaEEyh+76nE7h7hDns6/45/9rHjSK5wT8JNf9e3ac5xiji+H
+ 8+s3SdGLF+uegVe+MTsfESfGgT/35MPKM+mL9wYe7kdX4steD62QVL0FNw8fCuyPYe0QHU2qeJpH
+ ASK23zzJZIcJmkd5Fq77SF4p7jqUV/uwr5//xAef8azP3X7vr37C/qrjXd2Pwbha37BSOP5OaBzK
+ F+JE36lD5ZtFp7DEx0bQmnP4u0PxrVj+uiQ+HGz3godm2/HJ58Oct2nPX3HMoPp60qVjnOsSQAk6
+ flwWejXSJ4zENT7tJg/ju37miWlgxwtHoMfiW7inv2EtvaOTBfX5SeahXympyToDh18+DusKxvJj
+ gsO6130ouxxzEg8DdR/NIIJ5nqf+cVB+8Y57cLhPro90WpkdibzPelHf9e2d2513fuFVfFixYNZ6
+ dcffeeuLz+64/t2Q8eP7E1Zi31iyvwFQfF4engla07vYLe4+seXHi4EiW3DG8xOQ8QNYdudpv8J9
+ CYBf28xbG84g2/EU/ug3j/JySSJN6UdmJ9jjXK/LPcQd8nT+Hf+xX6Yxj8J3dWQf86692q8yd1PO
+ oWa/eUteAFX3nPkmQn7harawRJ4YB/6c+4eVZ9JfVC/3oyvxZa+HVkiq3oK7nvTFsHaITulIh19U
+ U25RDVz7zSN3XoqgebCIjoS0HiJWBkVmH3+I+77za3f+5g/93k/5oVBetflqfsOKSl/xGT90fn72
+ RfgbDR/R4eby0i/RA/Rh8NQjfvDB2Z6HhthxK3KGjq/LAL/iCCSgbIfFrhraX7T1NnQ8Mb505qdF
+ O/xaInBdwsQ13vBVB+NdCIIZF3PPqzzgzVh8C/f0N6zDOVAsCTruUwmYcwFAO7aBw6++F/aUHxMc
+ 1r3Ot+xyzEk8DGR6Dy4qDivnWXaBxO88oit74da5k06VKjT716+ff+Ds2h1fdJUfViz4aj+wWOGX
+ v+i1+Jr6ZVD5Ok9xffJT9DW8X58o/397VxtraVWdzzn3zgxQgQEKSP1ATSWhlaaJ/dNUrYxNTEYL
+ CRoxVRtpy4/GtJo2TfnRxFT7o/5ppLRNjKlaCtoMatKG4h/LxJg2/eE/EezwVQlWKylcYGBm7syc
+ t896nrX2Xvs97713mLngPTPvvtyz9seznrX22muv+56Zwx1Mc+xJZyiNJZkCcWZ+meMMDceksAkA
+ gxedMpZ+jN2Hsk4YckcGpE8P8JL843rwO0faX+jJH8P17DA7MccF43W7Diz6xJnHapVPhBpL3/YL
+ wrJPjghLfocjblDLDJQM+GuxT5z0xWfxpZVWZr8jDiGl6MwutsCXfQLe2BvSAyb8LUaS36FvaxVX
+ 48dzRdyIc37t0PBQwoL88Tj42BeyAEvwhiciCLv84QLCGBcU/ZW94keKn+yHP0RQ1ebn3WSOSnDz
+ 0ZvecH/w7VS58wuWRe53rrPPZ/2xnb5+omEOh5Gb5rXOI7Z1HqJwXMdC/EShuoCkibHW3Y7jQSR7
+ jb7xJzdor8CQUmY3+ZvGUPN1w8s/OpH2F/ONvwYKO8ZHorATQ/EVfeIMqFb5Km58wuqdg4WKcU75
+ lOLnkaRQnIHDl45DcQWBr0NgQXHfLK/8/LKeGEiQz1PnFXYIUh7RTsqDXj7JP6NjT+zMv+kfHnvf
+ m/5FTDv7dTkKlsXwlrd8Fn8i+Jnyk4VBr8HVvH7y2FFy7ElnKI0ltY7J0qlnaLjxCcvjxbhZ9Hpx
+ tSmLP+JnqR+X27rRdB7Og3PIY4YdwEb6eeqcHJ/0aC/ITW6B1znKvy3tkI47qRa4v7RvX4l9+M59
+ X8Dhi3bcr7oORSzIn9iXxr6QBXnMVKFhhF3P5mmnjg1rTXGTXvEjxa/Eg7UqIkLVPz/2/jf8LXtL
+ 8NI7pSXw+AsP/NlsNvn00CXhKfsh5UMrp8/D0uFqHfstHfSbdSUF6JQMyro2uZBVWve4MclT0iwk
+ lwyU5Cnm/Rio73bNHc/agndY7IfryX83X/QanLtoovKJsI4dVPaRL+GiXw1/EwiEKc4hS9DT3SEZ
+ +kPS3WqE47bFTkPsg8yPKfObzeZTK09OQPB0uc5egetYbd3yBfoCYh0dH5fpsFTMWEd6Yk08fT8q
+ ndvxCeDkp8yFfTB98uhNV38q0ez4rsVg+doXH7gVSfA5OF/816Xzw8SOdJktOQThOrKiuZzMhVj3
+ 3AE+P2FJ33ko+usePmalzJFWWVHsowNgsr+wLh76bd3sdx0WAy0u8fb1enbq/mPf/f3UeXqkYYpb
+ mbDthJupQ632hbcRei7L5bF94VvxSjLOKUkaMv2h5rzBH/K07GR+513goR/hORR8XN8WBon76xsU
+ jxcfhqMsMBA0Z3SWJ0FLqoQr6+Lhcrw4QeFBh+dtE2ixD3woFBOz3zt60+u/EKrLIi0uy9m++MAH
+ 4fyXcD/3lA2waPQuL0/Pt8kssLtll9TvWOnYBL4x1noctsa+kIUng/OYE7RfBOhSMbB1N1DsY0bm
+ wz/ISDJDG5/L7CbtxHpacPNFr8GRSS/F/mb8JQ7hRy+uRpX0o0iEGV4OrDcSi3R3SMY5DckgzdJx
+ DX/YG+LHHI/3dPidj+ZNPzXaj3PGvI3dElHVHOJnOE4QKJyPyzQjZOtU947HkaPEExCbB4EdR+Fh
+ xydiHRLF6hj8+MiRm66+J6kvTVc3Ymnc7Tn6Dw9cP+26r+MA/HfD+2ECxsseh+ZjHaoVEb9rpaOx
+ 4LZuh2+HnYA+1mXP68BYK+uupuvhhgjAi/RIu7BuGIP7kbiUvUwjv1pc4u3r9exUPtnROO+nzssh
+ vtIvhSP5h4lwt3aEb17jHFzyHABQHAYkcPQrSfKb/lBzXt1Wi48cOy07mX8jHvphcXB/fDz8ZOQw
+ uoV94asWF48A7bjbhBtOYwjr4dv3xVHlsdXS+jwYRxwNg3/M+NlusnLj0Zte962is2Qdi8tyty8+
+ dN10euKbSIQrIqmbS8lD9G2aYC4PFy1liYpGFC2pF0XX9xTCbc30sq9LrBRLxY9RFk/xT+4wqbSM
+ 9UgyTBjOWsFrKAOxTkOBq365ootQ5DDxhV6Kh0HMbhMnw3nyc9n5kn9tIGwb9bLQfxuTBfRDMgI5
+ JIFfaI7bFjsL5JjI/DYMjM2npqLo54x5G9cdFhrFAzvnOgPgONpxnOmHpWJGONnRehS95Ib4wet0
+ bscnjHc+f+rk6vT69Ruu5m/4zbrL1F+evyXcKKq3XPvdbmX2a7hkj9hx1ssdp2fZYXfQxpJMAZsu
+ HZvXWPriCbwr9vSDz9jRCr/T+rWkXQHwmvzjuvvFdVt2u+iGnvxx/wwXdky/+G96MbSNJH3iDKhW
+ +SpOxRnrhImo2je95DcDhanihy1DMeYNjlb1ja/dF90GppGuT73AhxShvda2Bb7sExpb2gEm/C0G
+ uL+0b1+ouBo/s8Ai0vDEOiYZnvTDy8e+kAV5zFQNp/Gk+AERP0wNF01xkx73m/3vJo/Op7NfXfZi
+ ZXtVVGPXyyzvfOSK6fwYnrQ6PHHZoXoS5suks2dyah2g0jEFjaXvyYp1JSk7bXKZHZ+GpgHb9YXk
+ koHin8w5f+inS2J8nI79cCg7Pp/9d/MtX+Bc1USxvxm/7xvC+Rb9wgJZGZ8mEAiDF5tGAk2+IRn6
+ Q5JWei+Oa/jhT30Sgd+naqdHzWHmd54yn/CyZ+eD+BjO9IplG1v8PB6WD5wgUDgfl2lGyNbxzSa+
+ ui+z4zwBgeQ67bh1N4z57xzZvWv/ZP9VTyX40naX/wkrQv/bP/+TrrvgHUiOgzxiZYmyBRhdKkmt
+ Y7J0HIaxLrMup61TTx3ite7zXHcHaE88pPWklb5hdH2KPsfB7xxIssBnST5TtxZ2TL8smF4MBSz6
+ xBlQrdg3BTSNpU8+5y36hMkvmZNeGNQyVpxPVuq+yNPbF3kAbGTyx255X6/PH+MGF3plX/DjVOw4
+ HqI2BjTt21ciLmKOfQKHryY+tGzrUGR4vKiRt+oRwPWIh+Lr4YCyCMKu7Hh8qregAS7zYIzfuvCt
+ Iyurv362FCvbrqKTNr703fse3jP7ybEv4QQ/GMmsrPHdluSxpMEcxsPrnqyOz9nguQE1+0lX1cnj
+ Y9IyiXNy1eTTepj3YyCx26Vbmqed5Gb4G0laxwN8iQddtsq3CT/3kS/hol/VLniaQNgw4pckrDf7
+ zuPQH5Jyu3113LbYaZk1yvyYMb/ZbD412o9zxryNkRHWI8ppFA/DcYJA4XxcpsNSMSM+2TFWxDN4
+ aEEvXI9jwBT+R+Z7jux+/Ucm+6fHEmzpu8rYpd9GbwP4P6Zn//jQ5/A3iLfmS6VDjUsIHeaCQuC1
+ AnBb1yWr6wASYKK/7rbLutNG0to8m0m/vOhZ0hFZ1gkiP3s+X4uL1rMfFZd4+3o9O5Uv9t3fT50X
+ v+z29aofyS/vLoh6axnHevksDhGvJIGnvSRlr9zi1kSPH1Xh9O1k5o14bL54bhvQuL4tDBL31+Ha
+ N+INvB2TSfKY5DhYsR60pEo4m7c8wpf0CdCL8+CjC589csPr/ghGTPGsahaXs7at3Png7+JQ70Dy
+ n89NMguQFMgWpoCPmT0GaNbj0gjP68QsszFH4rFcMz1r/XVLKkuuCgDIeQ3Okak5AfXTus83/kIn
+ DFKvbKSYb/kIDwdNWfakpvlBfu4r4iRPKy7563y8tLEPzOlS6lJRzy6pzRt+SPKyYWVIAr/QHLct
+ dhbIMZH5bRgYm0+N9uOcMW/jusNCo3gYjrwECufjMh2WihnxyY75YXF0HjBEk93JEfyDER8/euPr
+ Ph/zZ5v0zDvbtpX2c9dD181Odl+bzaZv5qHjUjEFyq1RCLxW8LKXIpOBBMRlV1ERkdsq60ruxSco
+ GSyXHklHpOmlZutsLgs+YMVOxm1S5Hp2Kp/0Nc77qfPyI9yJuCW7iE+4WzvCN6+8jdBzWS9fvdrG
+ qqtpMN9PkuQ3/aHmvMEf8rTsZH7nXeChH+ExFHysYuJD8ri/vjHxqNjwGIOHdqQnVuw/aAsPZoiH
+ wFe/aIH70RPT1RvXb7hqqT+2wO1u8nL2/KH7Rpv88LXfnZ+YvRWfQ/mqZUFJvno7lBIDSRV4XquS
+ VJFMUKg5VLLNYUwqA5g9NRko9nk95U9xHVkc+Cyb5A0/TL8smF4MZa/oE2dAtWLf/dJY+uRz3qJP
+ Ovklc+IPg1rGStlntWM98vT2RR6sNTL5Y8Wpr9fnj3GDCz232/CHvSE74SdkaYZzvuCxtYgLeoRq
+ DH/xRVyJQ6wDhgXDlSLjY1/IgjyyQ3q8GI/Hg6PEE5DJ5K4Xj+355bO9WNl2LcbnTLO3iEjCO7Dh
+ 85GLngsKgY2Vo558nAAmgD5efCLx8JX1SDE3QH3DaFyecHwsfucwVOBdFrzclD+8Sz5BHJIYkqnd
+ 1+vZGcZJ3wnoTPVDvvX16Df9cN/dro9a4ZffA+yXN+I0IO1y236SlD3b4UDr8Z+RnUzvvCo2yU+b
+ j7ga3se1aAWJ++vwXLQsXDYmD+2IRlliRclpSZVwNo8vK374g/Wj+F1Wt+It4F2EnQMvnvXnwE5j
+ i3iLuNp1X8ORv5l3jLngYVC2+OWPSxNFBEBmWRF+qTQmfX8dScXkoiFDyEC5/JiR+bAPieQt665X
+ x7RSDNq8E3ChDhOfWS32pV/5hKvjxM/t5iK46BfjEfy8dG4Xc7nYkN/2ZfOGH5KhPySBX2iO2xY7
+ C+SYyPw2DIzNp1aKEBDcF9djh4VG8QCCRUpAsKBDO0WAxfmLGet48UavFKv5/Jx4C5hCze7Z/5aw
+ v2O8RTyxPnsrcuBLzI1SFJgNXix0OW1dl50dyxZf93mfpgmvFqVoWDJCQfqGqGOoYWRj52EPL0je
+ wGdJvOCmID9MvyyYXgwFLPrEGVDN5qVWcSqqWCdMREWfMPmV9cKglrFiDqRW9Y2v3Rd53JxpZV7q
+ BT6k8fb4Y7wR3uZP2Q7pW/+1v7Rv8wEt9oVeGgOHr7yPug4YFuSPx8HHvpAFeWSH9HhRhMKuv628
+ 68WLzo23gBGFkL1TiulzQ67c/dBH8cth/w7JMPC3iJ6smyWXXQque7zsUvkYwpPYk5SQmnxaJ7xe
+ Auq7XdO3sUvi47TSvN8Sxw3wuT4B/qLLswU/95Ev4aJfcNDtQjaBsGHEL0mgm33ncegPSfe7EY7b
+ FjsNsQ8yP6bMbzabT+2VfMKCF0dPTrtbj7733HkLmELNblyB/vy5M7a3iJPJ13CJ8beIusTlbRxv
+ l4WICy7sEusSMosjgiw2pq/k1hOU65VQp0vvPznj0kfAo0jFfC0ujih23DCLRuLlOPYB2bNT+aSv
+ cd5PnS9uo9PXi426OW08NtGXfvmjqOmSR5wGJPC0l6Ts2YEMtB7/GdnJ9M674K/NR1wN72O9XfMh
+ edxfh5fiBjyPMXhMWt5IKL+C1nnw0cLvTbvpzYfP8r8F5HY3efGs3wRxLiz9/fcvXNkz/Wt8zu6j
+ VoTyJWYSeVFQluluMgcxr3UPErMwrSMLS3EjxMKdigtHsqdlZW2/ONQxUTKArs2bv/TLRfhFJP1O
+ /JzUOOMG+QGo8z2/nbfald8xNjO8nMA10ubxLbaejEAOSSPsN8c1/GEP2Jdkp89t48zvfITZfGql
+ CNm5Yt7GdWeFRnGwfCAvgcL5uEzTc1vHNxsz6K4XfuaqW/GvMB+N2XNVWozH5hFYvfP7+6az6R3d
+ tPuFthgge+ySUqQiFLlp+mVdudZ/sokkLkUgrq3ppUa7Nvb5gg9YseMTxKUi2Nfr2al80tdY+rwk
+ SZ9uFTO2b3OrTHg8iCr++qgVvI3Qc6lLHnEakMDRryQVX/NgoPX4z8hOpt/IX5uPuBrexy/DE9aD
+ k9nsDw7vf/WO/9dscthezr5n38tpYsm4D3aru/7nvz6Gvy7+FD5sepFy1sKEJLXLWoVfqnRX++v2
+ ExVf5ZJ7kqtI1JQv69TfpPjEaXnRoB7vjhbcfLWXcekYiv20nmjSPnORWvSL8QCv/PDAuB0WJfA3
+ UlEsV92jqrEXhyg2jUy+l67jG/6w91LtFNLUyfzOx1WbT03F188Z8zauJ6taZmEuOPISKJyPy7Ql
+ 2GTyHMafxFPV3+Cp6oRNjE0RiCswxqMfgQOPv3rX+vrtk9n0A7lK6bJ7EYrcNF1mZRG4hH4dbZ5N
+ 41IsFtYdFXiXBV9o0IFdFglTIW6xmBS9np0y3/Dn/chQ5a9+cbvZP/qhdfnh/b7gbTS/paDLy22E
+ d60Ejn4mSX7TH2rOG/whT8tO5nfeBR76YXFyf3y8HU9YID2A3wr68Rfec8WPsytjXxFQdo7R2DAC
+ q1/G28RudscEbxN1aVJRwuWNu0ICu8x+iS2VrWiNT1ip+CAmisuAjEAOyaHTcRyLCc/hDOxsxe9+
+ E2Z2U1Mx25YnrAeRX+PbvxTboe5YsIai0p+zt4n/+/DHupP+NhHXjk8gcfsMP1CsSvUin34ilyec
+ eKYwvdTqk43mCz5gxY5PUN8vK3hCv+j17JR5t6tx3k+1S7eKGbuUlT/vV7hwkKP2pVeEFp5YjBff
+ Ec6hIiR7bbEoRnr88VPktOwUUnMI9hCnBR4WrfDYcfHDKYbkcX99Y+JxPqjbGAaem8/n49s/xmvr
+ l02ybGvlcw5hbxNPrN+OS/4BphqTmTmtUFgRwIILpLD/5LUJNpOpuHBkeF+nYlr3+VpkxEID6FJP
+ jnCB6jFvM0mfAH+pfLJbxwXg+8hFatGvhj+KhlPwcsJ+I7FGd4dk6A9J52yE4xr+sDfEjzlG/3T4
+ nY/2TT812o9zxryN3RJR1RziZzhOENfBnzvn3cpt49u/FNAtun5TtkCNy00EVr/8yL7ZrLsDtw9v
+ E33Jq4ULJqff+gBApktPRSS3KaTGImRjn9+smFCNuMTb1+vZqXyyq7H0VU3qvPj5Cndy8XL/svtu
+ V+jea7213JcuOaMT3rUSeNpLkvEwnqHW40dVOH07mX8jHvphcXJ/fHyqf4aF/wfwwenK7Nbn333l
+ f2RzY3/rCLS3ZWv8iIgIHOhWdp98+Gbk6p/ioxC/xGm7tH6JLZXHJ6zxCctrnp6suul38fuqPvPC
+ 4Sv+afKB6clIpVGeegTGgnXqsdoQufKVR9+DJ67b8Psd3+Y1i8WqVC9q6idyeVKJZ4rek4mtCy5Z
+ 8HFSpShm3PiEVZ7AGJ+d9YTVzSf/PlmZ/eXz77783g2TaFw4pQjENTgl8AjaPAKrBx59G35Z4G3T
+ 2eQ94xPW4ts6SzbFZUDWRxG9Hc7jobD7Ot9eokg18qXa2Yrf+Qgzu6np7a29XcZ+MW9je7a2HaJ/
+ 34lu8ukj+1/9n0ll7J5BBMaCdQbB20h194FHr8P/VH0bPgpxM56QVoRTEpcnpvEJa9PixTjh8ocs
+ T1BDQffihQrRFDsVk4HiCI4oLsEfsqF3vgUem4/zMwUfW9HCf3ir1x2Yz1f/4vD+yx5s+MbBGUdg
+ LFhnHMJNCA789xt3zY//yXQ6uwUPAedFkvNyQE0lDNLexljztzNl3efrWDDiCIce7470qR7zBk36
+ NoxW+UIv/kDdEU7U4hbfdjb8USycgpccPI3EWr7qZr2MQ39IOmcjHNfwh73Mi/6WdhpiH2R+5+OK
+ zaemYmbxm78AS59fX9312aPvuuQHCTJ2tzECythtJBypBiLw5ceu3L3afQIrvz+bTi/WJfVr5EUl
+ tKxIsLmsRcMRNg+CFrdYTIpeXNcFPtkRTvqqHnVefoQ7UdSSf/Qj+eXdBdErQrrkqVhBwVgVF0jg
+ 6VeSLI69YlHs9PjjSeu07BRSc0gbXOChH/IY3afxj2rdMb1gz+3Pvn3vM1l97G9/BDz7tp94ZByI
+ wIFHL95tRWsy/QQ+hHNluaS5SOEGlGKzUGScM823RcYvfeaDivHldkr8XhRZRKi/6BeLSPD75Q47
+ Q0Wn7BegUpxM38ahPySDNEvHbYudzBv9zI8585fN5q11kyfR+6vn1n/2c5PfnL6oyfH15Y5Am8kv
+ t7WRv0Rg99efeMvsxPEP4dL/Fr5fHwuluKSixMsdJ2XzmGhxi8WkFKV4dlngE6Fw0lcVqfP0ye0W
+ PudhsaIf7nnMx0ay7BWhhScWYM0M92kSeNpLUvYMMdB6/Bs+GUF1SzuZ3nmTv0+A4CsnT87uOvzu
+ Sx/I0LH/ykTA0/GVMTZaGYgAfjPbeV99Yt98Mv8Q7vz7cCAX9YtDHbt+Kj5tkfFLH8Uj45Lpyqfj
+ r+PE70WRRYQ8i0WRRQQqph9FIswMFR2zVoqS6eXxBkWHvEGaZS4msN/Yy7zob2kn80Y/+OfdM/jL
+ k3tOzlbvPrxv77ex2Q2qZiiO8uWMgJ3l2HZKBA4+ft7up2fvXem6D+Et434Ugt2bFRO6vUExKXpx
+ XYlTcalFKMbjExbjZUUKDWIdJfAb+Fly97OvufSfJ784XefC+PJTj8BYsH7qR7CBA/f+4JLz1ifv
+ xyPJh/FJ+rcDZXdKLRWf8Qlr8e1j80RlgcN3ebLzJ6fyRBhFChDgvg22uyd7ZveMf4DuubbDRFyB
+ HebW6E4Tgft+dPn568fxa266fd1sdj3elOCfKPOjo1x8uzY+YfnbRASyFCv0c/HCb0l4ZDad3Y8P
+ Th2c7971b4ffceFTTdzHwY6LwFiwdtyRbO3Q+fc9+dru+ORd+Dfa9uE22q91fq1pWe3Kb/c4wfn2
+ mEsxS09q0nPbTtTiFotiwx9PLk7R/JkS+NIfXDdFoxST0B+SztkIx700O92T3by7fzJbOXhsNv/m
+ kesve7LhHAc7PgJtJu94d0cHhyKw51+fvGblJJ68Jp0Vr+uBubwUmygPC8VJRy+cipFXO5qweXVc
+ WNFBt85jHRMBqx3hm9deETqV4kW/oBeS/MYz1Hr88Xavm89/BP2D80l3cNKt3P/sb+x9bEh9nFue
+ CHhWLo/Do6dbRAB/Uvyqb/z4WvwPItdPZngCm3TvxCftL81aLAKYMGmtjjlUFWIxykVq5z9hwfGn
+ 8Ynzb+Fzbvcfn3YHD++77Hu+o1GcJREYC9ZZcpAbbgMF7Px7/+/nVqbHr5lMZ9fgf8y+Bk8g7OPh
+ 60345P0q/8ddK156hCJVFDM+oGGmFjVPGce/0k9YcPEE/Hwcny44hBJ6CNs7BPcOrc92HzryzvN/
+ CEc3eAzbMELjwhJFYCxYS3RY2+4qfvXzRcefeuNkPkUBQyHDNxLiGpSna3DrX4Nixtuv4vXKPWF1
+ MDvt5j/sJrNDkIcwftgK1ImTq4eeX7nwsfFfktn2TFgawrFgLc1RvcKOfqfbdeHzz188Pd5dMpkd
+ 3zudrO6dTE/unUxWICeXwJu9+EN/jPGNMYqa+jEnd9cg1vBEt4aiszaxD2HOpmvzebcG3TX8Mru1
+ ldnkGfwt3RrK4Rp41ubT2drhC161NvmV6XFRjK9jBGoE/h//xb5CiJqhJQAAAABJRU5ErkJggg==
+ installModes:
+ - supported: true
+ type: OwnNamespace
+ - supported: true
+ type: SingleNamespace
+ - supported: true
+ type: MultiNamespace
+ - supported: true
+ type: AllNamespaces
+ install:
+ strategy: deployment
+ spec:
+ deployments:
+ - name: clickhouse-operator
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: clickhouse-operator
+ template:
+ metadata:
+ labels:
+ app: clickhouse-operator
+ spec:
+ containers:
+ - env:
+ - name: OPERATOR_POD_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: OPERATOR_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: OPERATOR_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: OPERATOR_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: OPERATOR_POD_SERVICE_ACCOUNT
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.serviceAccountName
+ - name: OPERATOR_CONTAINER_CPU_REQUEST
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: requests.cpu
+ - name: OPERATOR_CONTAINER_CPU_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: limits.cpu
+ - name: OPERATOR_CONTAINER_MEM_REQUEST
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: requests.memory
+ - name: OPERATOR_CONTAINER_MEM_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ resource: limits.memory
+ - name: WATCH_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ image: docker.io/altinity/clickhouse-operator:0.25.4
+ imagePullPolicy: Always
+ name: clickhouse-operator
+ - image: docker.io/altinity/metrics-exporter:0.25.4
+ imagePullPolicy: Always
+ name: metrics-exporter
+ serviceAccountName: clickhouse-operator
+ permissions:
+ - serviceAccountName: clickhouse-operator
+ rules:
+ #
+ # Core API group
+ #
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - services
+ - persistentvolumeclaims
+ - secrets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ #
+ # apps.* resources
+ #
+ - apiGroups:
+ - apps
+ resources:
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - apps
+ resources:
+ - replicasets
+ verbs:
+ - get
+ - patch
+ - update
+ - delete
+ # The operator deployment personally, identified by name
+ - apiGroups:
+ - apps
+ resources:
+ - deployments
+ resourceNames:
+ - clickhouse-operator
+ verbs:
+ - get
+ - patch
+ - update
+ - delete
+ #
+ # policy.* resources
+ #
+ - apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ #
+ # discovery.* resources
+ #
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+ #
+ # apiextensions
+ #
+ - apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ # clickhouse - related resources
+ - apiGroups:
+ - clickhouse.altinity.com
+ #
+ # The operator's specific Custom Resources
+ #
+
+ resources:
+ - clickhouseinstallations
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - delete
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallationtemplates
+ - clickhouseoperatorconfigurations
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallations/finalizers
+ - clickhouseinstallationtemplates/finalizers
+ - clickhouseoperatorconfigurations/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallations/status
+ - clickhouseinstallationtemplates/status
+ - clickhouseoperatorconfigurations/status
+ verbs:
+ - get
+ - update
+ - patch
+ - create
+ - delete
+ # clickhouse-keeper - related resources
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - delete
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations/status
+ verbs:
+ - get
+ - update
+ - patch
+ - create
+ - delete
diff --git a/deploy/operatorhub/0.25.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
new file mode 100644
index 000000000..a5b5ff5a7
--- /dev/null
+++ b/deploy/operatorhub/0.25.4/clickhouseinstallations.clickhouse.altinity.com.crd.yaml
@@ -0,0 +1,1426 @@
+# Template Parameters:
+#
+# KIND=ClickHouseInstallation
+# SINGULAR=clickhouseinstallation
+# PLURAL=clickhouseinstallations
+# SHORT=chi
+# OPERATOR_VERSION=0.25.4
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhouseinstallations.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.25.4
+spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseInstallation
+ singular: clickhouseinstallation
+ plural: clickhouseinstallations
+ shortNames:
+ - chi
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: version
+ type: string
+ description: Operator version
+ priority: 1 # show in wide view
+ jsonPath: .status.chop-version
+ - name: clusters
+ type: integer
+ description: Clusters count
+ jsonPath: .status.clusters
+ - name: shards
+ type: integer
+ description: Shards count
+ priority: 1 # show in wide view
+ jsonPath: .status.shards
+ - name: hosts
+ type: integer
+ description: Hosts count
+ jsonPath: .status.hosts
+ - name: taskID
+ type: string
+ description: TaskID
+ priority: 1 # show in wide view
+ jsonPath: .status.taskID
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
+ - name: hosts-completed
+ type: integer
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
+ - name: hosts-updated
+ type: integer
+ description: Updated hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUpdated
+ - name: hosts-added
+ type: integer
+ description: Added hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsAdded
+ - name: hosts-deleted
+ type: integer
+ description: Hosts deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDeleted
+ - name: endpoint
+ type: string
+ description: Client access endpoint
+ priority: 1 # show in wide view
+ jsonPath: .status.endpoint
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ - name: suspend
+ type: string
+ description: Suspend reconciliation
+ # Displayed in all priorities
+ jsonPath: .spec.suspend
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters"
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: |
+ APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |
+ Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ status:
+ type: object
+ description: |
+ Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other
+ properties:
+ chop-version:
+ type: string
+ description: "Operator version"
+ chop-commit:
+ type: string
+ description: "Operator git commit SHA"
+ chop-date:
+ type: string
+ description: "Operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this resource"
+ clusters:
+ type: integer
+ minimum: 0
+ description: "Clusters count"
+ shards:
+ type: integer
+ minimum: 0
+ description: "Shards count"
+ replicas:
+ type: integer
+ minimum: 0
+ description: "Replicas count"
+ hosts:
+ type: integer
+ minimum: 0
+ description: "Hosts count"
+ status:
+ type: string
+ description: "Status"
+ taskID:
+ type: string
+ description: "Current task id"
+ taskIDsStarted:
+ type: array
+ description: "Started task ids"
+ nullable: true
+ items:
+ type: string
+ taskIDsCompleted:
+ type: array
+ description: "Completed task ids"
+ nullable: true
+ items:
+ type: string
+ action:
+ type: string
+ description: "Action"
+ actions:
+ type: array
+ description: "Actions"
+ nullable: true
+ items:
+ type: string
+ error:
+ type: string
+ description: "Last error"
+ errors:
+ type: array
+ description: "Errors"
+ nullable: true
+ items:
+ type: string
+ hostsUnchanged:
+ type: integer
+ minimum: 0
+ description: "Unchanged Hosts count"
+ hostsUpdated:
+ type: integer
+ minimum: 0
+ description: "Updated Hosts count"
+ hostsAdded:
+ type: integer
+ minimum: 0
+ description: "Added Hosts count"
+ hostsCompleted:
+ type: integer
+ minimum: 0
+ description: "Completed Hosts count"
+ hostsDeleted:
+ type: integer
+ minimum: 0
+ description: "Deleted Hosts count"
+ hostsDelete:
+ type: integer
+ minimum: 0
+ description: "About to delete Hosts count"
+ pods:
+ type: array
+ description: "Pods"
+ nullable: true
+ items:
+ type: string
+ pod-ips:
+ type: array
+ description: "Pod IPs"
+ nullable: true
+ items:
+ type: string
+ fqdns:
+ type: array
+ description: "Pods FQDNs"
+ nullable: true
+ items:
+ type: string
+ endpoint:
+ type: string
+ description: "Endpoint"
+ endpoints:
+ type: array
+ description: "All endpoints"
+ nullable: true
+ items:
+ type: string
+ generation:
+ type: integer
+ minimum: 0
+ description: "Generation"
+ normalized:
+ type: object
+ description: "Normalized resource requested"
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized resource completed"
+ x-kubernetes-preserve-unknown-fields: true
+ hostsWithTablesCreated:
+ type: array
+ description: "List of hosts with tables created by the operator"
+ nullable: true
+ items:
+ type: string
+ hostsWithReplicaCaughtUp:
+ type: array
+ description: "List of hosts with replica caught up"
+ nullable: true
+ items:
+ type: string
+ usedTemplates:
+ type: array
+ description: "List of templates used to build this CHI"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ # x-kubernetes-preserve-unknown-fields: true
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md
+ properties:
+ taskID:
+ type: string
+ description: |
+ Allows to define custom taskID for CHI update and watch status of this update execution.
+ Displayed in all .status.taskID* fields.
+ By default (if not filled) every update of CHI manifest will generate random taskID
+ stop: &TypeStringBool
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ restart:
+ type: string
+ description: |
+ In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile.
+ This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts.
+ enum:
+ - ""
+ - "RollingUpdate"
+ suspend:
+ !!merge <<: *TypeStringBool
+ description: |
+ Suspend reconciliation of resources managed by a ClickHouse Installation.
+ Works as the following:
+ - When `suspend` is `true` operator stops reconciling all resources.
+ - When `suspend` is `false` or not set, operator reconciles all resources.
+ troubleshoot:
+ !!merge <<: *TypeStringBool
+ description: |
+ Allows to troubleshoot Pods during CrashLoopBack state.
+ This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start.
+ Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts
+ and give time to troubleshoot via CLI.
+ Liveness and Readiness probes are disabled as well.
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ templating:
+ type: object
+ # nullable: true
+ description: |
+ Optional, applicable inside ClickHouseInstallationTemplate only.
+ Defines current ClickHouseInstallationTemplate application options to target ClickHouseInstallation(s)."
+ properties:
+ policy:
+ type: string
+ description: |
+ When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate
+ will be auto-added into ClickHouseInstallation, selectable by `chiSelector`.
+ Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly.
+ enum:
+ - ""
+ - "auto"
+ - "manual"
+ chiSelector:
+ type: object
+ description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ reconciling: &TypeReconcile
+ type: object
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet: &TypeObjectsCleanup
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed Service, `Retain` by default"
+ macros:
+ type: object
+ description: "macros parameters"
+ properties:
+ sections:
+ type: object
+ description: "sections behaviour for macros"
+ properties:
+ users:
+ type: object
+ description: "sections behaviour for macros on users"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ profiles:
+ type: object
+ description: "sections behaviour for macros on profiles"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ quotas:
+ type: object
+ description: "sections behaviour for macros on quotas"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ settings:
+ type: object
+ description: "sections behaviour for macros on settings"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ files:
+ type: object
+ description: "sections behaviour for macros on files"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ !!merge <<: *TypeStringBool
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ !!merge <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ !!merge <<: *TypeStringBool
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "no" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner: &TypePVCProvisioner
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy: &TypePVCReclaimPolicy
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates: &TypeTemplateNames
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ serviceTemplates:
+ type: array
+ description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ nullable: true
+ items:
+ type: string
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ zookeeper: &TypeZookeeperConfig
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/
+ currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl`
+ More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper
+ # nullable: true
+ properties:
+ nodes:
+ type: array
+ description: "describe every available zookeeper cluster node for interaction"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - host
+ properties:
+ host:
+ type: string
+ description: "dns name or ip address for Zookeeper node"
+ port:
+ type: integer
+ description: "TCP port which used to connect to Zookeeper node"
+ minimum: 0
+ maximum: 65535
+ secure:
+ !!merge <<: *TypeStringBool
+ description: "if a secure connection to Zookeeper is required"
+ availabilityZone:
+ type: string
+ description: "availability zone for Zookeeper node"
+ session_timeout_ms:
+ type: integer
+ description: "session timeout during connect to Zookeeper"
+ operation_timeout_ms:
+ type: integer
+ description: "one operation timeout during Zookeeper transactions"
+ root:
+ type: string
+ description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)"
+ identity:
+ type: string
+ description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
+ users:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure password hashed, authorization restrictions, database level security row filters etc.
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ secret value will pass in `pod.spec.containers.evn`, and generate with from_env=XXX in XML in /etc/clickhouse-server/users.d/chop-generated-users.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ any key with prefix `k8s_secret_` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write directly into XML tag during render *-usersd ConfigMap
+
+ any key with prefix `k8s_secret_env` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write into environment variable and write to XML tag via from_env=XXX
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of settings profile
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of resource quotas
+ More details: https://clickhouse.tech/docs/en/operations/quotas/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ settings: &TypeSettings
+ type: object
+ description: |
+ allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ secret value will pass in `pod.spec.env`, and generate with from_env=XXX in XML in /etc/clickhouse-server/config.d/chop-generated-settings.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files: &TypeFiles
+ type: object
+ description: |
+ allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ every key in this object is the file name
+ every value in this object is the file content
+ you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html
+ each key could contains prefix like {common}, {users}, {hosts} or config.d, users.d, conf.d, wrong prefixes will be ignored, subfolders also will be ignored
+ More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass values from kubernetes secrets
+ secrets will mounted into pod as separate volume in /etc/clickhouse-server/secrets.d/
+ and will automatically update when update secret
+ it useful for pass SSL certificates from cert-manager or similar tool
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes clusters layout and allows change settings on cluster-level, shard-level and replica-level
+ every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server`
+ all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml`
+ Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zookeeper:
+ !!merge <<: *TypeZookeeperConfig
+ description: |
+ optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.zookeeper` settings
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ schemaPolicy:
+ type: object
+ description: |
+ describes how schema is propagated within replicas and shards
+ properties:
+ replica:
+ type: string
+ description: "how schema is propagated within a replica"
+ enum:
+ # List SchemaPolicyReplicaXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ shard:
+ type: string
+ description: "how schema is propagated between shards"
+ enum:
+ # List SchemaPolicyShardXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ - "DistributedTablesOnly"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: optional, open secure ports for cluster
+ secret:
+ type: object
+ description: "optional, shared secret value to secure cluster communications"
+ properties:
+ auto:
+ !!merge <<: *TypeStringBool
+ description: "Auto-generate shared secret value to secure cluster communications"
+ value:
+ description: "Cluster shared secret value in plain text"
+ type: string
+ valueFrom:
+ description: "Cluster shared secret source"
+ type: object
+ properties:
+ secretKeyRef:
+ description: |
+ Selects a key of a secret in the clickhouse installation namespace.
+ Should not be used if value is not empty.
+ type: object
+ properties:
+ name:
+ description: |
+ Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ key:
+ description: The key of the secret to select from. Must be a valid secret key.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be defined
+ type: boolean
+ required:
+ - name
+ - key
+ pdbManaged:
+ !!merge <<: *TypeStringBool
+ description: |
+ Specifies whether the Pod Disruption Budget (PDB) should be managed.
+ During the next installation, if PDB management is enabled, the operator will
+ attempt to retrieve any existing PDB. If none is found, it will create a new one
+ and initiate a reconciliation loop. If PDB management is disabled, the existing PDB
+ will remain intact, and the reconciliation loop will not be executed. By default,
+ PDB management is enabled.
+ pdbMaxUnavailable:
+ type: integer
+ description: |
+ Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction,
+ i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions
+ by specifying 0. This is a mutually exclusive setting with "minAvailable".
+ minimum: 0
+ maximum: 65535
+ reconcile:
+ type: object
+ description: "allow tuning reconciling process"
+ properties:
+ runtime:
+ !!merge <<: *TypeReconcileRuntime
+ host:
+ !!merge <<: *TypeReconcileHost
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ # nullable: true
+ properties:
+ shardsCount:
+ type: integer
+ description: |
+ how much shards for current ClickHouse cluster will run in Kubernetes,
+ each shard contains shared-nothing part of data and contains set of replicas,
+ cluster contains 1 shard by default"
+ replicasCount:
+ type: integer
+ description: |
+ how much replicas in each shards for current cluster will run in Kubernetes,
+ each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ every shard contains 1 replica by default"
+ shards:
+ type: array
+ description: |
+ optional, allows override top-level `chi.spec.configuration`, cluster-level
+ `chi.spec.configuration.clusters` settings for each shard separately,
+ use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ definitionType:
+ type: string
+ description: "DEPRECATED - to be removed soon"
+ weight:
+ type: integer
+ description: |
+ optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ internalReplication:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise
+ allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard
+ override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates`
+ replicasCount:
+ type: integer
+ description: |
+ optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ shard contains 1 replica by default
+ override cluster-level `chi.spec.configuration.clusters.layout.replicasCount`
+ minimum: 1
+ replicas:
+ type: array
+ description: |
+ optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards`
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates`
+ replicas:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ shardsCount:
+ type: integer
+ description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`"
+ minimum: 1
+ shards:
+ type: array
+ description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents"
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ hostTemplates:
+ type: array
+ description: "hostTemplate will use during apply to generate `clickhose-server` config files"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`"
+ type: string
+ portDistribution:
+ type: array
+ description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network"
+ enum:
+ # List PortDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClusterScopeIndex"
+ spec:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]`
+ More info: https://clickhouse.tech/docs/en/interfaces/tcp/
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]`
+ More info: https://clickhouse.tech/docs/en/interfaces/http/
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]`
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do"
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ generateName:
+ type: string
+ description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables"
+ zone:
+ type: object
+ description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ #required:
+ # - values
+ properties:
+ key:
+ type: string
+ description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`"
+ values:
+ type: array
+ description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`"
+ # nullable: true
+ items:
+ type: string
+ distribution:
+ type: string
+ description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ enum:
+ - ""
+ - "Unspecified"
+ - "OnePerHost"
+ podDistribution:
+ type: array
+ description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "you can define multiple affinity policy types"
+ enum:
+ # List PodDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClickHouseAntiAffinity"
+ - "ShardAntiAffinity"
+ - "ReplicaAntiAffinity"
+ - "AnotherNamespaceAntiAffinity"
+ - "AnotherClickHouseInstallationAntiAffinity"
+ - "AnotherClusterAntiAffinity"
+ - "MaxNumberPerNode"
+ - "NamespaceAffinity"
+ - "ClickHouseInstallationAffinity"
+ - "ClusterAffinity"
+ - "ShardAffinity"
+ - "ReplicaAffinity"
+ - "PreviousTailAffinity"
+ - "CircularReplication"
+ scope:
+ type: string
+ description: "scope for apply each podDistribution"
+ enum:
+ # list PodDistributionScopeXXX constants
+ - ""
+ - "Unspecified"
+ - "Shard"
+ - "Replica"
+ - "Cluster"
+ - "ClickHouseInstallation"
+ - "Namespace"
+ number:
+ type: integer
+ description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type"
+ minimum: 0
+ maximum: 65535
+ topologyKey:
+ type: string
+ description: |
+ use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`,
+ more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: |
+ allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ provisioner: *TypePVCProvisioner
+ reclaimPolicy: *TypePVCReclaimPolicy
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ generateName:
+ type: string
+ description: |
+ allows define format for generated `Service` name,
+ look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates
+ for details about available template variables"
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ useTemplates:
+ type: array
+ description: |
+ list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `CHI`
+ manifest during render Kubernetes resources to create related ClickHouse clusters"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "name of `ClickHouseInstallationTemplate` (chit) resource"
+ namespace:
+ type: string
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
+ useType:
+ type: string
+ description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
+ enum:
+ # List useTypeXXX constants from model
+ - ""
+ - "merge"
diff --git a/deploy/operatorhub/0.25.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
new file mode 100644
index 000000000..6af0653e5
--- /dev/null
+++ b/deploy/operatorhub/0.25.4/clickhouseinstallationtemplates.clickhouse.altinity.com.crd.yaml
@@ -0,0 +1,1426 @@
+# Template Parameters:
+#
+# KIND=ClickHouseInstallationTemplate
+# SINGULAR=clickhouseinstallationtemplate
+# PLURAL=clickhouseinstallationtemplates
+# SHORT=chit
+# OPERATOR_VERSION=0.25.4
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhouseinstallationtemplates.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.25.4
+spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseInstallationTemplate
+ singular: clickhouseinstallationtemplate
+ plural: clickhouseinstallationtemplates
+ shortNames:
+ - chit
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: version
+ type: string
+ description: Operator version
+ priority: 1 # show in wide view
+ jsonPath: .status.chop-version
+ - name: clusters
+ type: integer
+ description: Clusters count
+ jsonPath: .status.clusters
+ - name: shards
+ type: integer
+ description: Shards count
+ priority: 1 # show in wide view
+ jsonPath: .status.shards
+ - name: hosts
+ type: integer
+ description: Hosts count
+ jsonPath: .status.hosts
+ - name: taskID
+ type: string
+ description: TaskID
+ priority: 1 # show in wide view
+ jsonPath: .status.taskID
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
+ - name: hosts-completed
+ type: integer
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
+ - name: hosts-updated
+ type: integer
+ description: Updated hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUpdated
+ - name: hosts-added
+ type: integer
+ description: Added hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsAdded
+ - name: hosts-deleted
+ type: integer
+ description: Hosts deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDeleted
+ - name: endpoint
+ type: string
+ description: Client access endpoint
+ priority: 1 # show in wide view
+ jsonPath: .status.endpoint
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ - name: suspend
+ type: string
+ description: Suspend reconciliation
+ # Displayed in all priorities
+ jsonPath: .spec.suspend
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters"
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: |
+ APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |
+ Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ status:
+ type: object
+ description: |
+ Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other
+ properties:
+ chop-version:
+ type: string
+ description: "Operator version"
+ chop-commit:
+ type: string
+ description: "Operator git commit SHA"
+ chop-date:
+ type: string
+ description: "Operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this resource"
+ clusters:
+ type: integer
+ minimum: 0
+ description: "Clusters count"
+ shards:
+ type: integer
+ minimum: 0
+ description: "Shards count"
+ replicas:
+ type: integer
+ minimum: 0
+ description: "Replicas count"
+ hosts:
+ type: integer
+ minimum: 0
+ description: "Hosts count"
+ status:
+ type: string
+ description: "Status"
+ taskID:
+ type: string
+ description: "Current task id"
+ taskIDsStarted:
+ type: array
+ description: "Started task ids"
+ nullable: true
+ items:
+ type: string
+ taskIDsCompleted:
+ type: array
+ description: "Completed task ids"
+ nullable: true
+ items:
+ type: string
+ action:
+ type: string
+ description: "Action"
+ actions:
+ type: array
+ description: "Actions"
+ nullable: true
+ items:
+ type: string
+ error:
+ type: string
+ description: "Last error"
+ errors:
+ type: array
+ description: "Errors"
+ nullable: true
+ items:
+ type: string
+ hostsUnchanged:
+ type: integer
+ minimum: 0
+ description: "Unchanged Hosts count"
+ hostsUpdated:
+ type: integer
+ minimum: 0
+ description: "Updated Hosts count"
+ hostsAdded:
+ type: integer
+ minimum: 0
+ description: "Added Hosts count"
+ hostsCompleted:
+ type: integer
+ minimum: 0
+ description: "Completed Hosts count"
+ hostsDeleted:
+ type: integer
+ minimum: 0
+ description: "Deleted Hosts count"
+ hostsDelete:
+ type: integer
+ minimum: 0
+ description: "About to delete Hosts count"
+ pods:
+ type: array
+ description: "Pods"
+ nullable: true
+ items:
+ type: string
+ pod-ips:
+ type: array
+ description: "Pod IPs"
+ nullable: true
+ items:
+ type: string
+ fqdns:
+ type: array
+ description: "Pods FQDNs"
+ nullable: true
+ items:
+ type: string
+ endpoint:
+ type: string
+ description: "Endpoint"
+ endpoints:
+ type: array
+ description: "All endpoints"
+ nullable: true
+ items:
+ type: string
+ generation:
+ type: integer
+ minimum: 0
+ description: "Generation"
+ normalized:
+ type: object
+ description: "Normalized resource requested"
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized resource completed"
+ x-kubernetes-preserve-unknown-fields: true
+ hostsWithTablesCreated:
+ type: array
+ description: "List of hosts with tables created by the operator"
+ nullable: true
+ items:
+ type: string
+ hostsWithReplicaCaughtUp:
+ type: array
+ description: "List of hosts with replica caught up"
+ nullable: true
+ items:
+ type: string
+ usedTemplates:
+ type: array
+ description: "List of templates used to build this CHI"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ # x-kubernetes-preserve-unknown-fields: true
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md
+ properties:
+ taskID:
+ type: string
+ description: |
+ Allows to define custom taskID for CHI update and watch status of this update execution.
+ Displayed in all .status.taskID* fields.
+ By default (if not filled) every update of CHI manifest will generate random taskID
+ stop: &TypeStringBool
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ restart:
+ type: string
+ description: |
+ In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile.
+ This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts.
+ enum:
+ - ""
+ - "RollingUpdate"
+ suspend:
+ !!merge <<: *TypeStringBool
+ description: |
+ Suspend reconciliation of resources managed by a ClickHouse Installation.
+ Works as the following:
+ - When `suspend` is `true` operator stops reconciling all resources.
+ - When `suspend` is `false` or not set, operator reconciles all resources.
+ troubleshoot:
+ !!merge <<: *TypeStringBool
+ description: |
+ Allows to troubleshoot Pods during CrashLoopBack state.
+ This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start.
+ Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts
+ and give time to troubleshoot via CLI.
+ Liveness and Readiness probes are disabled as well.
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ templating:
+ type: object
+ # nullable: true
+ description: |
+ Optional, applicable inside ClickHouseInstallationTemplate only.
+ Defines current ClickHouseInstallationTemplate application options to target ClickHouseInstallation(s)."
+ properties:
+ policy:
+ type: string
+ description: |
+ When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate
+ will be auto-added into ClickHouseInstallation, selectable by `chiSelector`.
+ Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly.
+ enum:
+ - ""
+ - "auto"
+ - "manual"
+ chiSelector:
+ type: object
+ description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ reconciling: &TypeReconcile
+ type: object
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet: &TypeObjectsCleanup
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed Service, `Retain` by default"
+ macros:
+ type: object
+ description: "macros parameters"
+ properties:
+ sections:
+ type: object
+ description: "sections behaviour for macros"
+ properties:
+ users:
+ type: object
+ description: "sections behaviour for macros on users"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ profiles:
+ type: object
+ description: "sections behaviour for macros on profiles"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ quotas:
+ type: object
+ description: "sections behaviour for macros on quotas"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ settings:
+ type: object
+ description: "sections behaviour for macros on settings"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ files:
+ type: object
+ description: "sections behaviour for macros on files"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ !!merge <<: *TypeStringBool
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ reconcile:
+ !!merge <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ !!merge <<: *TypeStringBool
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "no" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner: &TypePVCProvisioner
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy: &TypePVCReclaimPolicy
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates: &TypeTemplateNames
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ serviceTemplates:
+ type: array
+ description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ nullable: true
+ items:
+ type: string
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ zookeeper: &TypeZookeeperConfig
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/
+ currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl`
+ More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper
+ # nullable: true
+ properties:
+ nodes:
+ type: array
+ description: "describe every available zookeeper cluster node for interaction"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - host
+ properties:
+ host:
+ type: string
+ description: "dns name or ip address for Zookeeper node"
+ port:
+ type: integer
+ description: "TCP port which used to connect to Zookeeper node"
+ minimum: 0
+ maximum: 65535
+ secure:
+ !!merge <<: *TypeStringBool
+ description: "if a secure connection to Zookeeper is required"
+ availabilityZone:
+ type: string
+ description: "availability zone for Zookeeper node"
+ session_timeout_ms:
+ type: integer
+ description: "session timeout during connect to Zookeeper"
+ operation_timeout_ms:
+ type: integer
+ description: "one operation timeout during Zookeeper transactions"
+ root:
+ type: string
+ description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)"
+ identity:
+ type: string
+ description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
+ users:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure password hashed, authorization restrictions, database level security row filters etc.
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ secret value will pass in `pod.spec.containers.evn`, and generate with from_env=XXX in XML in /etc/clickhouse-server/users.d/chop-generated-users.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ any key with prefix `k8s_secret_` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write directly into XML tag during render *-usersd ConfigMap
+
+ any key with prefix `k8s_secret_env` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write into environment variable and write to XML tag via from_env=XXX
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of settings profile
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of resource quotas
+ More details: https://clickhouse.tech/docs/en/operations/quotas/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ settings: &TypeSettings
+ type: object
+ description: |
+ allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ secret value will pass in `pod.spec.env`, and generate with from_env=XXX in XML in /etc/clickhouse-server/config.d/chop-generated-settings.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files: &TypeFiles
+ type: object
+ description: |
+ allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ every key in this object is the file name
+ every value in this object is the file content
+ you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html
+ each key could contains prefix like {common}, {users}, {hosts} or config.d, users.d, conf.d, wrong prefixes will be ignored, subfolders also will be ignored
+ More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass values from kubernetes secrets
+ secrets will mounted into pod as separate volume in /etc/clickhouse-server/secrets.d/
+ and will automatically update when update secret
+ it useful for pass SSL certificates from cert-manager or similar tool
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes clusters layout and allows change settings on cluster-level, shard-level and replica-level
+ every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server`
+ all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml`
+ Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zookeeper:
+ !!merge <<: *TypeZookeeperConfig
+ description: |
+ optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.zookeeper` settings
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ schemaPolicy:
+ type: object
+ description: |
+ describes how schema is propagated within replicas and shards
+ properties:
+ replica:
+ type: string
+ description: "how schema is propagated within a replica"
+ enum:
+ # List SchemaPolicyReplicaXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ shard:
+ type: string
+ description: "how schema is propagated between shards"
+ enum:
+ # List SchemaPolicyShardXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ - "DistributedTablesOnly"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: optional, open secure ports for cluster
+ secret:
+ type: object
+ description: "optional, shared secret value to secure cluster communications"
+ properties:
+ auto:
+ !!merge <<: *TypeStringBool
+ description: "Auto-generate shared secret value to secure cluster communications"
+ value:
+ description: "Cluster shared secret value in plain text"
+ type: string
+ valueFrom:
+ description: "Cluster shared secret source"
+ type: object
+ properties:
+ secretKeyRef:
+ description: |
+ Selects a key of a secret in the clickhouse installation namespace.
+ Should not be used if value is not empty.
+ type: object
+ properties:
+ name:
+ description: |
+ Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ key:
+ description: The key of the secret to select from. Must be a valid secret key.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be defined
+ type: boolean
+ required:
+ - name
+ - key
+ pdbManaged:
+ !!merge <<: *TypeStringBool
+ description: |
+ Specifies whether the Pod Disruption Budget (PDB) should be managed.
+ During the next installation, if PDB management is enabled, the operator will
+ attempt to retrieve any existing PDB. If none is found, it will create a new one
+ and initiate a reconciliation loop. If PDB management is disabled, the existing PDB
+ will remain intact, and the reconciliation loop will not be executed. By default,
+ PDB management is enabled.
+ pdbMaxUnavailable:
+ type: integer
+ description: |
+ Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction,
+ i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions
+ by specifying 0. This is a mutually exclusive setting with "minAvailable".
+ minimum: 0
+ maximum: 65535
+ reconcile:
+ type: object
+ description: "allow tuning reconciling process"
+ properties:
+ runtime:
+ !!merge <<: *TypeReconcileRuntime
+ host:
+ !!merge <<: *TypeReconcileHost
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ # nullable: true
+ properties:
+ shardsCount:
+ type: integer
+ description: |
+ how much shards for current ClickHouse cluster will run in Kubernetes,
+ each shard contains shared-nothing part of data and contains set of replicas,
+ cluster contains 1 shard by default"
+ replicasCount:
+ type: integer
+ description: |
+ how much replicas in each shards for current cluster will run in Kubernetes,
+ each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ every shard contains 1 replica by default"
+ shards:
+ type: array
+ description: |
+ optional, allows override top-level `chi.spec.configuration`, cluster-level
+ `chi.spec.configuration.clusters` settings for each shard separately,
+ use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ definitionType:
+ type: string
+ description: "DEPRECATED - to be removed soon"
+ weight:
+ type: integer
+ description: |
+ optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ internalReplication:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise
+ allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard
+ override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates`
+ replicasCount:
+ type: integer
+ description: |
+ optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ shard contains 1 replica by default
+ override cluster-level `chi.spec.configuration.clusters.layout.replicasCount`
+ minimum: 1
+ replicas:
+ type: array
+ description: |
+ optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards`
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates`
+ replicas:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ shardsCount:
+ type: integer
+ description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`"
+ minimum: 1
+ shards:
+ type: array
+ description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents"
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ hostTemplates:
+ type: array
+ description: "hostTemplate will use during apply to generate `clickhose-server` config files"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`"
+ type: string
+ portDistribution:
+ type: array
+ description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network"
+ enum:
+ # List PortDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClusterScopeIndex"
+ spec:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]`
+ More info: https://clickhouse.tech/docs/en/interfaces/tcp/
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]`
+ More info: https://clickhouse.tech/docs/en/interfaces/http/
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]`
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do"
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ generateName:
+ type: string
+ description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables"
+ zone:
+ type: object
+ description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ #required:
+ # - values
+ properties:
+ key:
+ type: string
+ description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`"
+ values:
+ type: array
+ description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`"
+ # nullable: true
+ items:
+ type: string
+ distribution:
+ type: string
+ description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ enum:
+ - ""
+ - "Unspecified"
+ - "OnePerHost"
+ podDistribution:
+ type: array
+ description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "you can define multiple affinity policy types"
+ enum:
+ # List PodDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClickHouseAntiAffinity"
+ - "ShardAntiAffinity"
+ - "ReplicaAntiAffinity"
+ - "AnotherNamespaceAntiAffinity"
+ - "AnotherClickHouseInstallationAntiAffinity"
+ - "AnotherClusterAntiAffinity"
+ - "MaxNumberPerNode"
+ - "NamespaceAffinity"
+ - "ClickHouseInstallationAffinity"
+ - "ClusterAffinity"
+ - "ShardAffinity"
+ - "ReplicaAffinity"
+ - "PreviousTailAffinity"
+ - "CircularReplication"
+ scope:
+ type: string
+ description: "scope for apply each podDistribution"
+ enum:
+ # list PodDistributionScopeXXX constants
+ - ""
+ - "Unspecified"
+ - "Shard"
+ - "Replica"
+ - "Cluster"
+ - "ClickHouseInstallation"
+ - "Namespace"
+ number:
+ type: integer
+ description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type"
+ minimum: 0
+ maximum: 65535
+ topologyKey:
+ type: string
+ description: |
+ use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`,
+ more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: |
+ allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ provisioner: *TypePVCProvisioner
+ reclaimPolicy: *TypePVCReclaimPolicy
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ generateName:
+ type: string
+ description: |
+ allows define format for generated `Service` name,
+ look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates
+ for details about available template variables"
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ useTemplates:
+ type: array
+ description: |
+ list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `CHI`
+ manifest during render Kubernetes resources to create related ClickHouse clusters"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "name of `ClickHouseInstallationTemplate` (chit) resource"
+ namespace:
+ type: string
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
+ useType:
+ type: string
+ description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
+ enum:
+ # List useTypeXXX constants from model
+ - ""
+ - "merge"
diff --git a/deploy/operatorhub/0.25.4/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml b/deploy/operatorhub/0.25.4/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
new file mode 100644
index 000000000..661d48e97
--- /dev/null
+++ b/deploy/operatorhub/0.25.4/clickhousekeeperinstallations.clickhouse-keeper.altinity.com.crd.yaml
@@ -0,0 +1,883 @@
+# Template Parameters:
+#
+# OPERATOR_VERSION=0.25.4
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
+ labels:
+ clickhouse-keeper.altinity.com/chop: 0.25.4
+spec:
+ group: clickhouse-keeper.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseKeeperInstallation
+ singular: clickhousekeeperinstallation
+ plural: clickhousekeeperinstallations
+ shortNames:
+ - chk
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: version
+ type: string
+ description: Operator version
+ priority: 1 # show in wide view
+ jsonPath: .status.chop-version
+ - name: clusters
+ type: integer
+ description: Clusters count
+ jsonPath: .status.clusters
+ - name: shards
+ type: integer
+ description: Shards count
+ priority: 1 # show in wide view
+ jsonPath: .status.shards
+ - name: hosts
+ type: integer
+ description: Hosts count
+ jsonPath: .status.hosts
+ - name: taskID
+ type: string
+ description: TaskID
+ priority: 1 # show in wide view
+ jsonPath: .status.taskID
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
+ - name: hosts-unchanged
+ type: integer
+ description: Unchanged hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUnchanged
+ - name: hosts-updated
+ type: integer
+ description: Updated hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUpdated
+ - name: hosts-added
+ type: integer
+ description: Added hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsAdded
+ - name: hosts-completed
+ type: integer
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
+ - name: hosts-deleted
+ type: integer
+ description: Hosts deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDeleted
+ - name: hosts-delete
+ type: integer
+ description: Hosts to be deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDelete
+ - name: endpoint
+ type: string
+ description: Client access endpoint
+ priority: 1 # show in wide view
+ jsonPath: .status.endpoint
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ - name: suspend
+ type: string
+ description: Suspend reconciliation
+ # Displayed in all priorities
+ jsonPath: .spec.suspend
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters"
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: |
+ APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |
+ Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ status:
+ type: object
+ description: |
+ Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other
+ properties:
+ chop-version:
+ type: string
+ description: "Operator version"
+ chop-commit:
+ type: string
+ description: "Operator git commit SHA"
+ chop-date:
+ type: string
+ description: "Operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this resource"
+ clusters:
+ type: integer
+ minimum: 0
+ description: "Clusters count"
+ shards:
+ type: integer
+ minimum: 0
+ description: "Shards count"
+ replicas:
+ type: integer
+ minimum: 0
+ description: "Replicas count"
+ hosts:
+ type: integer
+ minimum: 0
+ description: "Hosts count"
+ status:
+ type: string
+ description: "Status"
+ taskID:
+ type: string
+ description: "Current task id"
+ taskIDsStarted:
+ type: array
+ description: "Started task ids"
+ nullable: true
+ items:
+ type: string
+ taskIDsCompleted:
+ type: array
+ description: "Completed task ids"
+ nullable: true
+ items:
+ type: string
+ action:
+ type: string
+ description: "Action"
+ actions:
+ type: array
+ description: "Actions"
+ nullable: true
+ items:
+ type: string
+ error:
+ type: string
+ description: "Last error"
+ errors:
+ type: array
+ description: "Errors"
+ nullable: true
+ items:
+ type: string
+ hostsUnchanged:
+ type: integer
+ minimum: 0
+ description: "Unchanged Hosts count"
+ hostsUpdated:
+ type: integer
+ minimum: 0
+ description: "Updated Hosts count"
+ hostsAdded:
+ type: integer
+ minimum: 0
+ description: "Added Hosts count"
+ hostsCompleted:
+ type: integer
+ minimum: 0
+ description: "Completed Hosts count"
+ hostsDeleted:
+ type: integer
+ minimum: 0
+ description: "Deleted Hosts count"
+ hostsDelete:
+ type: integer
+ minimum: 0
+ description: "About to delete Hosts count"
+ pods:
+ type: array
+ description: "Pods"
+ nullable: true
+ items:
+ type: string
+ pod-ips:
+ type: array
+ description: "Pod IPs"
+ nullable: true
+ items:
+ type: string
+ fqdns:
+ type: array
+ description: "Pods FQDNs"
+ nullable: true
+ items:
+ type: string
+ endpoint:
+ type: string
+ description: "Endpoint"
+ endpoints:
+ type: array
+ description: "All endpoints"
+ nullable: true
+ items:
+ type: string
+ generation:
+ type: integer
+ minimum: 0
+ description: "Generation"
+ normalized:
+ type: object
+ description: "Normalized resource requested"
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized resource completed"
+ x-kubernetes-preserve-unknown-fields: true
+ hostsWithTablesCreated:
+ type: array
+ description: "List of hosts with tables created by the operator"
+ nullable: true
+ items:
+ type: string
+ hostsWithReplicaCaughtUp:
+ type: array
+ description: "List of hosts with replica caught up"
+ nullable: true
+ items:
+ type: string
+ usedTemplates:
+ type: array
+ description: "List of templates used to build this CHI"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ # x-kubernetes-preserve-unknown-fields: true
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md
+ properties:
+ taskID:
+ type: string
+ description: |
+ Allows to define custom taskID for CHI update and watch status of this update execution.
+ Displayed in all .status.taskID* fields.
+ By default (if not filled) every update of CHI manifest will generate random taskID
+ stop: &TypeStringBool
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ suspend:
+ !!merge <<: *TypeStringBool
+ description: |
+ Suspend reconciliation of resources managed by a ClickHouse Keeper.
+ Works as the following:
+ - When `suspend` is `true` operator stops reconciling all resources.
+ - When `suspend` is `false` or not set, operator reconciles all resources.
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ reconciling:
+ type: object
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet: &TypeObjectsCleanup
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed Service, `Retain` by default"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ !!merge <<: *TypeStringBool
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "no" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner: &TypePVCProvisioner
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy: &TypePVCReclaimPolicy
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates: &TypeTemplateNames
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ serviceTemplates:
+ type: array
+ description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ nullable: true
+ items:
+ type: string
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ settings: &TypeSettings
+ type: object
+ description: |
+ allows configure multiple aspects and behavior for `clickhouse-keeper` instance
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files: &TypeFiles
+ type: object
+ description: |
+ allows define content of any setting
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes clusters layout and allows change settings on cluster-level and replica-level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ pdbManaged:
+ !!merge <<: *TypeStringBool
+ description: |
+ Specifies whether the Pod Disruption Budget (PDB) should be managed.
+ During the next installation, if PDB management is enabled, the operator will
+ attempt to retrieve any existing PDB. If none is found, it will create a new one
+ and initiate a reconciliation loop. If PDB management is disabled, the existing PDB
+ will remain intact, and the reconciliation loop will not be executed. By default,
+ PDB management is enabled.
+ pdbMaxUnavailable:
+ type: integer
+ description: |
+ Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction,
+ i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions
+ by specifying 0. This is a mutually exclusive setting with "minAvailable".
+ minimum: 0
+ maximum: 65535
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ # nullable: true
+ properties:
+ replicasCount:
+ type: integer
+ description: |
+ how much replicas in each shards for current cluster will run in Kubernetes,
+ each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ every shard contains 1 replica by default"
+ replicas:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ shardsCount:
+ type: integer
+ description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`"
+ minimum: 1
+ shards:
+ type: array
+ description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents"
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zkPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ raftPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ hostTemplates:
+ type: array
+ description: "hostTemplate will use during apply to generate `clickhose-server` config files"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`"
+ type: string
+ portDistribution:
+ type: array
+ description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network"
+ enum:
+ # List PortDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClusterScopeIndex"
+ spec:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zkPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ raftPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do"
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ generateName:
+ type: string
+ description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables"
+ zone:
+ type: object
+ description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ #required:
+ # - values
+ properties:
+ key:
+ type: string
+ description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`"
+ values:
+ type: array
+ description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`"
+ # nullable: true
+ items:
+ type: string
+ distribution:
+ type: string
+ description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ enum:
+ - ""
+ - "Unspecified"
+ - "OnePerHost"
+ podDistribution:
+ type: array
+ description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "you can define multiple affinity policy types"
+ enum:
+ # List PodDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClickHouseAntiAffinity"
+ - "ShardAntiAffinity"
+ - "ReplicaAntiAffinity"
+ - "AnotherNamespaceAntiAffinity"
+ - "AnotherClickHouseInstallationAntiAffinity"
+ - "AnotherClusterAntiAffinity"
+ - "MaxNumberPerNode"
+ - "NamespaceAffinity"
+ - "ClickHouseInstallationAffinity"
+ - "ClusterAffinity"
+ - "ShardAffinity"
+ - "ReplicaAffinity"
+ - "PreviousTailAffinity"
+ - "CircularReplication"
+ scope:
+ type: string
+ description: "scope for apply each podDistribution"
+ enum:
+ # list PodDistributionScopeXXX constants
+ - ""
+ - "Unspecified"
+ - "Shard"
+ - "Replica"
+ - "Cluster"
+ - "ClickHouseInstallation"
+ - "Namespace"
+ number:
+ type: integer
+ description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type"
+ minimum: 0
+ maximum: 65535
+ topologyKey:
+ type: string
+ description: |
+ use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`,
+ more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: |
+ allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ provisioner: *TypePVCProvisioner
+ reclaimPolicy: *TypePVCReclaimPolicy
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ generateName:
+ type: string
+ description: |
+ allows define format for generated `Service` name,
+ look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates
+ for details about available template variables"
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
diff --git a/deploy/operatorhub/0.25.4/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml b/deploy/operatorhub/0.25.4/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml
new file mode 100644
index 000000000..6367355de
--- /dev/null
+++ b/deploy/operatorhub/0.25.4/clickhouseoperatorconfigurations.clickhouse.altinity.com.crd.yaml
@@ -0,0 +1,519 @@
+# Template Parameters:
+#
+# NONE
+#
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clickhouseoperatorconfigurations.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.25.4
+spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseOperatorConfiguration
+ singular: clickhouseoperatorconfiguration
+ plural: clickhouseoperatorconfigurations
+ shortNames:
+ - chopconf
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: namespaces
+ type: string
+ description: Watch namespaces
+ jsonPath: .status
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ schema:
+ openAPIV3Schema:
+ type: object
+ description: "allows customize `clickhouse-operator` settings, need restart clickhouse-operator pod after adding, more details https://github.com/Altinity/clickhouse-operator/blob/master/docs/operator_configuration.md"
+ x-kubernetes-preserve-unknown-fields: true
+ properties:
+ status:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ Allows to define settings of the clickhouse-operator.
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/config/config.yaml
+ Check into etc-clickhouse-operator* ConfigMaps if you need more control
+ x-kubernetes-preserve-unknown-fields: true
+ properties:
+ watch:
+ type: object
+ description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
+ properties:
+ namespaces:
+ type: object
+ description: "List of namespaces where clickhouse-operator watches for events."
+ x-kubernetes-preserve-unknown-fields: true
+ clickhouse:
+ type: object
+ description: "Clickhouse related parameters used by clickhouse-operator"
+ properties:
+ configuration:
+ type: object
+ properties:
+ file:
+ type: object
+ properties:
+ path:
+ type: object
+ description: |
+ Each 'path' can be either absolute or relative.
+ In case path is absolute - it is used as is.
+ In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
+ properties:
+ common:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files common for all instances within a CHI are located.
+ Default value - config.d
+ host:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located.
+ Default value - conf.d
+ user:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files with users settings are located.
+ Files are common for all instances within a CHI.
+ Default value - users.d
+ user:
+ type: object
+ description: "Default parameters for any user which will create"
+ properties:
+ default:
+ type: object
+ properties:
+ profile:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ quota:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ networksIP:
+ type: array
+ description: "ClickHouse server configuration `...` for any "
+ items:
+ type: string
+ password:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ network:
+ type: object
+ description: "Default network parameters for any user which will create"
+ properties:
+ hostRegexpTemplate:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ configurationRestartPolicy:
+ type: object
+ description: "Configuration restart policy describes what configuration changes require ClickHouse restart"
+ properties:
+ rules:
+ type: array
+ description: "Array of set of rules per specified ClickHouse versions"
+ items:
+ type: object
+ properties:
+ version:
+ type: string
+ description: "ClickHouse version expression"
+ rules:
+ type: array
+ description: "Set of configuration rules for specified ClickHouse version"
+ items:
+ type: object
+ description: "setting: value pairs for configuration restart policy"
+ x-kubernetes-preserve-unknown-fields: true
+ access:
+ type: object
+ description: "parameters which use for connect to clickhouse from clickhouse-operator deployment"
+ properties:
+ scheme:
+ type: string
+ description: "The scheme to user for connecting to ClickHouse. Possible values: http, https, auto"
+ username:
+ type: string
+ description: "ClickHouse username to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName"
+ password:
+ type: string
+ description: "ClickHouse password to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName"
+ rootCA:
+ type: string
+ description: "Root certificate authority that clients use when verifying server certificates. Used for https connection to ClickHouse"
+ secret:
+ type: object
+ properties:
+ namespace:
+ type: string
+ description: "Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances"
+ name:
+ type: string
+ description: "Name of k8s Secret with username and password to be used by operator to connect to ClickHouse instances"
+ port:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "Port to be used by operator to connect to ClickHouse instances"
+ timeouts:
+ type: object
+ description: "Timeouts used to limit connection and queries from the operator to ClickHouse instances, In seconds"
+ properties:
+ connect:
+ type: integer
+ minimum: 1
+ maximum: 10
+ description: "Timout to setup connection from the operator to ClickHouse instances. In seconds."
+ query:
+ type: integer
+ minimum: 1
+ maximum: 600
+ description: "Timout to perform SQL query from the operator to ClickHouse instances. In seconds."
+ addons:
+ type: object
+ description: "Configuration addons specifies additional settings"
+ properties:
+ rules:
+ type: array
+ description: "Array of set of rules per specified ClickHouse versions"
+ items:
+ type: object
+ properties:
+ version:
+ type: string
+ description: "ClickHouse version expression"
+ spec:
+ type: object
+ description: "spec"
+ properties:
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ properties:
+ users:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ settings:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ files:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ metrics:
+ type: object
+ description: "parameters which use for connect to fetch metrics from clickhouse by clickhouse-operator"
+ properties:
+ timeouts:
+ type: object
+ description: |
+ Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances
+ Specified in seconds.
+ properties:
+ collect:
+ type: integer
+ minimum: 1
+ maximum: 600
+ description: |
+ Timeout used to limit metrics collection request. In seconds.
+ Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
+ All collected metrics are returned.
+ template:
+ type: object
+ description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
+ properties:
+ chi:
+ type: object
+ properties:
+ policy:
+ type: string
+ description: |
+ CHI template updates handling policy
+ Possible policy values:
+ - ReadOnStart. Accept CHIT updates on the operators start only.
+ - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply news CHITs on next regular reconcile of the CHI
+ enum:
+ - ""
+ - "ReadOnStart"
+ - "ApplyOnNextReconcile"
+ path:
+ type: string
+ description: "Path to folder where ClickHouseInstallationTemplate .yaml manifests are located."
+ reconcile:
+ type: object
+ description: "allow tuning reconciling process"
+ properties:
+ runtime:
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileCHIsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "How many goroutines will be used to reconcile CHIs in parallel, 10 by default"
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet:
+ type: object
+ description: "Allow change default behavior for reconciling StatefulSet which generated by clickhouse-operator"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ Possible options:
+ 1. abort - do nothing, just break the process and wait for admin.
+ 2. delete - delete newly created problematic StatefulSet.
+ 3. ignore (default) - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for created/updated StatefulSet to be Ready"
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for created/updated StatefulSet status"
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ Possible options:
+ 1. abort - do nothing, just break the process and wait for admin.
+ 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
+ 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ host:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude: &TypeStringBool
+ type: string
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be excluded from a ClickHouse cluster"
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ annotation:
+ type: object
+ description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
+ properties:
+ include:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`,
+ include annotations with names from the following list
+ items:
+ type: string
+ exclude:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`,
+ exclude annotations with names from the following list
+ items:
+ type: string
+ label:
+ type: object
+ description: "defines which metadata.labels will include or exclude during render StatefulSet, Pod, PVC resources"
+ properties:
+ include:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
+ include labels from the following list
+ items:
+ type: string
+ exclude:
+ type: array
+ items:
+ type: string
+ description: |
+ When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
+ exclude labels from the following list
+ appendScope:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether to append *Scope* labels to StatefulSet and Pod
+ - "LabelShardScopeIndex"
+ - "LabelReplicaScopeIndex"
+ - "LabelCHIScopeIndex"
+ - "LabelCHIScopeCycleSize"
+ - "LabelCHIScopeCycleIndex"
+ - "LabelCHIScopeCycleOffset"
+ - "LabelClusterScopeIndex"
+ - "LabelClusterScopeCycleSize"
+ - "LabelClusterScopeCycleIndex"
+ - "LabelClusterScopeCycleOffset"
+ metrics:
+ type: object
+ description: "defines metrics exporter options"
+ properties:
+ labels:
+ type: object
+ description: "defines metric labels options"
+ properties:
+ exclude:
+ type: array
+ description: |
+ When adding labels to a metric exclude labels with names from the following list
+ items:
+ type: string
+ status:
+ type: object
+ description: "defines status options"
+ properties:
+ fields:
+ type: object
+ description: "defines status fields options"
+ properties:
+ action:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'action'"
+ actions:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'actions'"
+ error:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'error'"
+ errors:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'errors'"
+ statefulSet:
+ type: object
+ description: "define StatefulSet-specific parameters"
+ properties:
+ revisionHistoryLimit:
+ type: integer
+ description: "revisionHistoryLimit is the maximum number of revisions that will be\nmaintained in the StatefulSet's revision history. \nLook details in `statefulset.spec.revisionHistoryLimit`\n"
+ pod:
+ type: object
+ description: "define pod specific parameters"
+ properties:
+ terminationGracePeriod:
+ type: integer
+ description: "Optional duration in seconds the pod needs to terminate gracefully. \nLook details in `pod.spec.terminationGracePeriodSeconds`\n"
+ logger:
+ type: object
+ description: "allow setup clickhouse-operator logger behavior"
+ properties:
+ logtostderr:
+ type: string
+ description: "boolean, allows logs to stderr"
+ alsologtostderr:
+ type: string
+ description: "boolean allows logs to stderr and files both"
+ v:
+ type: string
+ description: "verbosity level of clickhouse-operator log, default - 1 max - 9"
+ stderrthreshold:
+ type: string
+ vmodule:
+ type: string
+ description: |
+ Comma-separated list of filename=N, where filename (can be a pattern) must have no .go ext, and N is a V level.
+ Ex.: file*=2 sets the 'V' to 2 in all files with names like file*.
+ log_backtrace_at:
+ type: string
+ description: |
+ It can be set to a file and line number with a logging line.
+ Ex.: file.go:123
+ Each time when this line is being executed, a stack trace will be written to the Info log.
diff --git a/dev/generate_helm_chart.sh b/dev/generate_helm_chart.sh
index 1a6b25c5d..a31ebc6b2 100755
--- a/dev/generate_helm_chart.sh
+++ b/dev/generate_helm_chart.sh
@@ -75,6 +75,11 @@ function main() {
echo "WARNING"
echo "helm-docs is not available, skip docs generation"
fi
+
+# if [[ "0" == $(helm plugin list | grep -c schema) ]]; then
+# helm plugin install https://github.com/losisin/helm-values-schema-json.git
+# fi
+# helm schema --use-helm-docs -f "${values_yaml}" --output "${chart_path}/values.schema.json"
}
function process() {
@@ -200,7 +205,7 @@ function update_deployment_resource() {
yq e -i '.spec.template.metadata.annotations += {"{{ if .Values.podAnnotations }}{{ toYaml .Values.podAnnotations | nindent 8 }}{{ end }}": null}' "${file}"
yq e -i '.spec.template.spec.imagePullSecrets |= "{{ toYaml .Values.imagePullSecrets | nindent 8 }}"' "${file}"
yq e -i '.spec.template.spec.serviceAccountName |= "{{ include \"altinity-clickhouse-operator.serviceAccountName\" . }}"' "${file}"
- yq e -i '.spec.template.spec += {"{{- if .Values.operator.priorityClassName }}priorityClassName: {{ .Values.operator.priorityClassName | quote }}{{- end }}": null}' "${file}"
+ yq e -i '.spec.template.spec += {"{{ if .Values.operator.priorityClassName }}priorityClassName: {{ .Values.operator.priorityClassName | quote }}{{ end }}": null}' "${file}"
yq e -i '.spec.template.spec.nodeSelector |= "{{ toYaml .Values.nodeSelector | nindent 8 }}"' "${file}"
yq e -i '.spec.template.spec.affinity |= "{{ toYaml .Values.affinity | nindent 8 }}"' "${file}"
yq e -i '.spec.template.spec.tolerations |= "{{ toYaml .Values.tolerations | nindent 8 }}"' "${file}"
@@ -233,7 +238,7 @@ function update_deployment_resource() {
yq e -i '.spec.template.spec.containers[1].env += ["{{ with .Values.metrics.env }}{{ toYaml . | nindent 12 }}{{ end }}"]' "${file}"
perl -pi -e "s/'{{ if .Values.podAnnotations }}{{ toYaml .Values.podAnnotations \| nindent 8 }}{{ end }}': null/{{ if .Values.podAnnotations }}{{ toYaml .Values.podAnnotations \| nindent 8 }}{{ end }}/g" "${file}"
- perl -pi -e "s/'{{- if .Values.operator.priorityClassName }}priorityClassName: {{ .Values.operator.priorityClassName \| quote }}{{- end }}': null/{{- if .Values.operator.priorityClassName }}priorityClassName: {{ .Values.operator.priorityClassName | quote }}{{- end }}/g" "${file}"
+ perl -pi -e "s/'{{ if .Values.operator.priorityClassName }}priorityClassName: {{ .Values.operator.priorityClassName \| quote }}{{ end }}': null/{{ if .Values.operator.priorityClassName }}priorityClassName: {{ .Values.operator.priorityClassName | quote }}{{ end }}/g" "${file}"
perl -pi -e "s/- '{{ with .Values.operator.env }}{{ toYaml . \| nindent 12 }}{{ end }}'/{{ with .Values.operator.env }}{{ toYaml . \| nindent 12 }}{{ end }}/g" "${file}"
perl -pi -e "s/- '{{ with .Values.metrics.env }}{{ toYaml . \| nindent 12 }}{{ end }}'/{{ with .Values.metrics.env }}{{ toYaml . \| nindent 12 }}{{ end }}/g" "${file}"
perl -pi -e 's/(\s+\- name: metrics-exporter)/{{ if .Values.metrics.enabled }}\n$1/g' "${file}"
diff --git a/dev/go_build_config.sh b/dev/go_build_config.sh
index 8e3288dab..6474a3989 100755
--- a/dev/go_build_config.sh
+++ b/dev/go_build_config.sh
@@ -25,6 +25,8 @@ VERSION=$(cd "${SRC_ROOT}"; cat release)
GIT_SHA=$(cd "${SRC_ROOT}"; git rev-parse --short HEAD)
# 2020-03-07 14:54:56
NOW=$(date "+%FT%T")
+# Which version of golang to use. Ex.: 1.23.0
+GO_VERSION=$(cd "${SRC_ROOT}"; grep '^go ' go.mod | awk '{print $2}')
RELEASE="1"
diff --git a/dev/image_build_universal.sh b/dev/image_build_universal.sh
index 4245f3c07..f5e9c209f 100644
--- a/dev/image_build_universal.sh
+++ b/dev/image_build_universal.sh
@@ -87,7 +87,7 @@ else
fi
# Append VERSION and RELEASE
-DOCKER_CMD="${DOCKER_CMD} --build-arg VERSION=${VERSION:-dev}"
+DOCKER_CMD="${DOCKER_CMD} --build-arg VERSION=${VERSION:-dev} --build-arg GO_VERSION=${GO_VERSION}"
# Append GC flags if present
if [[ ! -z "${GCFLAGS}" ]]; then
diff --git a/dockerfile/metrics-exporter/Dockerfile b/dockerfile/metrics-exporter/Dockerfile
index 597e250f9..5b9cdae63 100644
--- a/dockerfile/metrics-exporter/Dockerfile
+++ b/dockerfile/metrics-exporter/Dockerfile
@@ -2,7 +2,9 @@
# ===== Builder =====
# ===================
-FROM --platform=${BUILDPLATFORM} golang:1.23 AS builder
+ARG GO_VERSION
+
+FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION:-latest} AS builder
ARG TARGETOS
ARG TARGETARCH
@@ -34,7 +36,7 @@ RUN METRICS_EXPORTER_BIN=/tmp/metrics-exporter bash -xe ./dev/go_build_metrics_e
# ===================
# == Delve builder ==
# ===================
-FROM --platform=${BUILDPLATFORM} golang:1.23 AS delve-builder
+FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION:-latest} AS delve-builder
RUN CGO_ENABLED=0 GO111MODULE=on GOOS="${TARGETOS}" GOARCH="${TARGETARCH}" \
go install -ldflags "-s -w -extldflags '-static'" github.com/go-delve/delve/cmd/dlv@latest && \
rm -rf /root/.cache/go-build/ /go/pkg/mod/
diff --git a/dockerfile/operator/Dockerfile b/dockerfile/operator/Dockerfile
index 9be1d8211..e2f2ae4ff 100644
--- a/dockerfile/operator/Dockerfile
+++ b/dockerfile/operator/Dockerfile
@@ -2,7 +2,9 @@
# ===== Builder =====
# ===================
-FROM --platform=${BUILDPLATFORM} golang:1.23 AS builder
+ARG GO_VERSION
+
+FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION:-latest} AS builder
ARG TARGETOS
ARG TARGETARCH
@@ -34,7 +36,7 @@ RUN OPERATOR_BIN=/tmp/clickhouse-operator bash -xe ./dev/go_build_operator.sh
# ===================
# == Delve builder ==
# ===================
-FROM --platform=${BUILDPLATFORM} golang:1.23 AS delve-builder
+FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION:-latest} AS delve-builder
RUN CGO_ENABLED=0 GO111MODULE=on GOOS="${TARGETOS}" GOARCH="${TARGETARCH}" \
go install -ldflags "-s -w -extldflags '-static'" github.com/go-delve/delve/cmd/dlv@latest && \
rm -rf /root/.cache/go-build/ /go/pkg/mod/
diff --git a/docs/chi-examples/14-zones-distribution-01.yaml b/docs/chi-examples/14-zones-distribution-01.yaml
index c2035b05a..318a38b30 100644
--- a/docs/chi-examples/14-zones-distribution-01.yaml
+++ b/docs/chi-examples/14-zones-distribution-01.yaml
@@ -19,7 +19,7 @@ spec:
podDistribution:
- type: ClickHouseAntiAffinity
scope: ClickHouseInstallation
- - name: clickhouse-in-zone-us-east-1f
+ - name: clickhouse-in-zone-us-east-1c
zone:
values:
- "us-east-1c"
diff --git a/docs/chi-examples/70-chop-config.yaml b/docs/chi-examples/70-chop-config.yaml
index 323773bee..a00ca3ffe 100644
--- a/docs/chi-examples/70-chop-config.yaml
+++ b/docs/chi-examples/70-chop-config.yaml
@@ -10,9 +10,12 @@ spec:
################################################
watch:
# List of namespaces where clickhouse-operator watches for events.
- # Concurrently running operators should watch on different namespaces
- #namespaces: ["dev", "test"]
- namespaces: []
+ # Concurrently running operators should watch on different namespaces.
+ # IMPORTANT
+ # Regexp is applicable.
+ namespaces:
+ include: []
+ exclude: []
clickhouse:
configuration:
@@ -95,40 +98,85 @@ spec:
##
################################################
reconcile:
+ # Reconcile runtime settings
runtime:
# Max number of concurrent CHI reconciles in progress
reconcileCHIsThreadsNumber: 10
- # Max number of concurrent shard reconciles in progress
- reconcileShardsThreadsNumber: 1
- # The maximum percentage of cluster shards that may be reconciled in parallel
+
+ # The operator reconciles shards concurrently in each CHI with the following limitations:
+ # 1. Number of shards being reconciled (and thus having hosts down) in each CHI concurrently
+ # can not be greater than 'reconcileShardsThreadsNumber'.
+ # 2. Percentage of shards being reconciled (and thus having hosts down) in each CHI concurrently
+ # can not be greater than 'reconcileShardsMaxConcurrencyPercent'.
+ # 3. The first shard is always reconciled alone. Concurrency starts from the second shard and onward.
+ # Thus limiting number of shards being reconciled (and thus having hosts down) in each CHI by both number and percentage
+
+ # Max number of concurrent shard reconciles within one cluster in progress
+ reconcileShardsThreadsNumber: 5
+ # Max percentage of concurrent shard reconciles within one cluster in progress
reconcileShardsMaxConcurrencyPercent: 50
+ # Reconcile StatefulSet scenario
statefulSet:
+ # Create StatefulSet scenario
create:
- # What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ # What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds
# Possible options:
- # 1. abort - do nothing, just break the process and wait for admin
- # 2. delete - delete newly created problematic StatefulSet
- # 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: ignore
+ # Update StatefulSet scenario
update:
- # How many seconds to wait for created/updated StatefulSet to be Ready
+ # How many seconds to wait for created/updated StatefulSet to be 'Ready'
timeout: 300
- # How many seconds to wait between checks for created/updated StatefulSet status
+ # How many seconds to wait between checks/polls for created/updated StatefulSet status
pollInterval: 5
- # What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ # What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds
# Possible options:
- # 1. abort - do nothing, just break the process and wait for admin
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
# 2. rollback - delete Pod and rollback StatefulSet to previous Generation.
- # Pod would be recreated by StatefulSet based on rollback-ed configuration
- # 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet
- onFailure: rollback
+ # Pod would be recreated by StatefulSet based on rollback-ed StatefulSet configuration.
+ # Follow 'abort' path afterwards.
+ # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ onFailure: abort
+ # Reconcile Host scenario
host:
+ # Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ # - to be excluded from a ClickHouse cluster
+ # - to complete all running queries
+ # - to be included into a ClickHouse cluster
+ # respectfully before moving forward with host reconcile
wait:
- exclude: "true"
- include: "false"
+ exclude: true
+ queries: true
+ include: false
+ # The operator during reconcile procedure should wait for replicas to catch-up
+ # replication delay a.k.a replication lag for the following replicas
+ replicas:
+ # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
+ all: no
+ # New replicas only are requested to wait for replication to catch-up
+ new: yes
+ # Replication catch-up is considered to be completed as soon as replication delay
+ # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
+ # is within this specified delay (in seconds)
+ delay: 10
+ probes:
+ # Whether the operator during host launch procedure should wait for startup probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to do not wait.
+ startup: no
+ # Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to wait.
+ readiness: yes
################################################
##
diff --git a/docs/chi-examples/99-clickhouseinstallation-max.yaml b/docs/chi-examples/99-clickhouseinstallation-max.yaml
index cc0a5ee2f..f0a8e4117 100644
--- a/docs/chi-examples/99-clickhouseinstallation-max.yaml
+++ b/docs/chi-examples/99-clickhouseinstallation-max.yaml
@@ -58,7 +58,7 @@ spec:
# autoPurge: "yes"
# Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side
- reconciling:
+ reconcile:
# DISCUSSED TO BE DEPRECATED
# Syntax sugar
# Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
@@ -104,6 +104,37 @@ spec:
# Max percentage of concurrent shard reconciles within one cluster in progress
reconcileShardsMaxConcurrencyPercent: 50
+ host:
+ # Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ # - to be excluded from a ClickHouse cluster
+ # - to complete all running queries
+ # - to be included into a ClickHouse cluster
+ # respectfully before moving forward with host reconcile
+ wait:
+ exclude: true
+ queries: true
+ include: false
+ # The operator during reconcile procedure should wait for replicas to catch-up
+ # replication delay a.k.a replication lag for the following replicas
+ replicas:
+ # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
+ all: no
+ # New replicas only are requested to wait for replication to catch-up
+ new: yes
+ # Replication catch-up is considered to be completed as soon as replication delay
+ # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
+ # is within this specified delay (in seconds)
+ delay: 10
+ probes:
+ # Whether the operator during host launch procedure should wait for startup probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to do not wait.
+ startup: no
+ # Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to wait.
+ readiness: yes
+
macros:
sections:
users:
@@ -240,13 +271,44 @@ spec:
shardsCount: 3
replicasCount: 2
reconcile:
- # Overwrites reconcile.runtime from the operator's config
+ # Optional, overwrites reconcile.runtime from the operator's config
runtime:
# Max number of concurrent shard reconciles within one cluster in progress
reconcileShardsThreadsNumber: 5
# Max percentage of concurrent shard reconciles within one cluster in progress
reconcileShardsMaxConcurrencyPercent: 50
+ host:
+ # Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ # - to be excluded from a ClickHouse cluster
+ # - to complete all running queries
+ # - to be included into a ClickHouse cluster
+ # respectfully before moving forward with host reconcile
+ wait:
+ exclude: true
+ queries: true
+ include: false
+ # The operator during reconcile procedure should wait for replicas to catch-up
+ # replication delay a.k.a replication lag for the following replicas
+ replicas:
+ # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
+ all: no
+ # New replicas only are requested to wait for replication to catch-up
+ new: yes
+ # Replication catch-up is considered to be completed as soon as replication delay
+ # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
+ # is within this specified delay (in seconds)
+ delay: 10
+ probes:
+ # Whether the operator during host launch procedure should wait for startup probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to do not wait.
+ startup: no
+ # Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to wait.
+ readiness: yes
+
- name: shards-only
templates:
podTemplate: clickhouse-v23.8
diff --git a/go.mod b/go.mod
index c7e8221f1..a519eb44a 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/altinity/clickhouse-operator
-go 1.23.0
+go 1.25.1
replace (
github.com/emicklei/go-restful/v3 => github.com/emicklei/go-restful/v3 v3.10.0
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go
index 253b7ecb2..9f1c01e86 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go
@@ -436,12 +436,12 @@ func (cr *ClickHouseKeeperInstallation) IsTroubleshoot() bool {
return false
}
-// GetReconciling gets reconciling spec
-func (cr *ClickHouseKeeperInstallation) GetReconciling() *apiChi.Reconciling {
+// GetReconcile gets reconcile spec
+func (cr *ClickHouseKeeperInstallation) GetReconcile() *apiChi.ChiReconcile {
if cr == nil {
return nil
}
- return cr.GetSpecT().Reconciling
+ return cr.GetSpecT().Reconcile
}
// Copy makes copy of a CHI, filtering fields according to specified CopyOptions
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go
index 762436fb0..301287826 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go
@@ -23,12 +23,13 @@ import (
type Cluster struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
- Settings *apiChi.Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
- Files *apiChi.Settings `json:"files,omitempty" yaml:"files,omitempty"`
- Templates *apiChi.TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"`
- Layout *ChkClusterLayout `json:"layout,omitempty" yaml:"layout,omitempty"`
- PDBManaged *types.StringBool `json:"pdbManaged,omitempty" yaml:"pdbManaged,omitempty"`
- PDBMaxUnavailable *types.Int32 `json:"pdbMaxUnavailable,omitempty" yaml:"pdbMaxUnavailable,omitempty"`
+ Settings *apiChi.Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
+ Files *apiChi.Settings `json:"files,omitempty" yaml:"files,omitempty"`
+ Templates *apiChi.TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"`
+ Layout *ChkClusterLayout `json:"layout,omitempty" yaml:"layout,omitempty"`
+ PDBManaged *types.StringBool `json:"pdbManaged,omitempty" yaml:"pdbManaged,omitempty"`
+ PDBMaxUnavailable *types.Int32 `json:"pdbMaxUnavailable,omitempty" yaml:"pdbMaxUnavailable,omitempty"`
+ Reconcile apiChi.ClusterReconcile `json:"reconcile" yaml:"reconcile"`
Runtime ChkClusterRuntime `json:"-" yaml:"-"`
}
@@ -125,11 +126,6 @@ func (c *Cluster) GetSecret() *apiChi.ClusterSecret {
return nil
}
-// GetRuntime is a getter
-func (cluster *Cluster) GetRuntime() apiChi.IClusterRuntime {
- return &cluster.Runtime
-}
-
// GetPDBManaged is a getter
func (cluster *Cluster) GetPDBManaged() *types.StringBool {
return cluster.PDBManaged
@@ -140,6 +136,16 @@ func (cluster *Cluster) GetPDBMaxUnavailable() *types.Int32 {
return cluster.PDBMaxUnavailable
}
+// GetReconcile is a getter
+func (cluster *Cluster) GetReconcile() apiChi.ClusterReconcile {
+ return cluster.Reconcile
+}
+
+// GetRuntime is a getter
+func (cluster *Cluster) GetRuntime() apiChi.IClusterRuntime {
+ return &cluster.Runtime
+}
+
// FillShardsReplicasExplicitlySpecified fills whether shard or replicas are explicitly specified
func (cluster *Cluster) FillShardsReplicasExplicitlySpecified() {
if len(cluster.Layout.Shards) > 0 {
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go
index b649262fb..28d15e41b 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go
@@ -21,13 +21,14 @@ import (
// ChkSpec defines spec section of ClickHouseKeeper resource
type ChkSpec struct {
- TaskID *types.Id `json:"taskID,omitempty" yaml:"taskID,omitempty"`
- NamespaceDomainPattern *types.String `json:"namespaceDomainPattern,omitempty" yaml:"namespaceDomainPattern,omitempty"`
- Suspend *types.StringBool `json:"suspend,omitempty" yaml:"suspend,omitempty"`
- Reconciling *apiChi.Reconciling `json:"reconciling,omitempty" yaml:"reconciling,omitempty"`
- Defaults *apiChi.Defaults `json:"defaults,omitempty" yaml:"defaults,omitempty"`
- Configuration *Configuration `json:"configuration,omitempty" yaml:"configuration,omitempty"`
- Templates *apiChi.Templates `json:"templates,omitempty" yaml:"templates,omitempty"`
+ TaskID *types.Id `json:"taskID,omitempty" yaml:"taskID,omitempty"`
+ NamespaceDomainPattern *types.String `json:"namespaceDomainPattern,omitempty" yaml:"namespaceDomainPattern,omitempty"`
+ Suspend *types.StringBool `json:"suspend,omitempty" yaml:"suspend,omitempty"`
+ Reconciling *apiChi.ChiReconcile `json:"reconciling,omitempty" yaml:"reconciling,omitempty"`
+ Reconcile *apiChi.ChiReconcile `json:"reconcile,omitempty" yaml:"reconcile,omitempty"`
+ Defaults *apiChi.Defaults `json:"defaults,omitempty" yaml:"defaults,omitempty"`
+ Configuration *Configuration `json:"configuration,omitempty" yaml:"configuration,omitempty"`
+ Templates *apiChi.Templates `json:"templates,omitempty" yaml:"templates,omitempty"`
}
// HasTaskID checks whether task id is specified
@@ -107,7 +108,7 @@ func (spec *ChkSpec) MergeFrom(from *ChkSpec, _type apiChi.MergeType) {
}
}
- spec.Reconciling = spec.Reconciling.MergeFrom(from.Reconciling, _type)
+ spec.Reconcile = spec.Reconcile.MergeFrom(from.Reconcile, _type)
spec.Defaults = spec.Defaults.MergeFrom(from.Defaults, _type)
spec.Configuration = spec.Configuration.MergeFrom(from.Configuration, _type)
spec.Templates = spec.Templates.MergeFrom(from.Templates, _type)
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go
index 8760a9d37..645b11f29 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go
@@ -300,7 +300,12 @@ func (in *ChkSpec) DeepCopyInto(out *ChkSpec) {
}
if in.Reconciling != nil {
in, out := &in.Reconciling, &out.Reconciling
- *out = new(clickhousealtinitycomv1.Reconciling)
+ *out = new(clickhousealtinitycomv1.ChiReconcile)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Reconcile != nil {
+ in, out := &in.Reconcile, &out.Reconcile
+ *out = new(clickhousealtinitycomv1.ChiReconcile)
(*in).DeepCopyInto(*out)
}
if in.Defaults != nil {
@@ -466,6 +471,7 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
*out = new(types.Int32)
**out = **in
}
+ in.Reconcile.DeepCopyInto(&out.Reconcile)
in.Runtime.DeepCopyInto(&out.Runtime)
return
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/interface.go b/pkg/apis/clickhouse.altinity.com/v1/interface.go
index 6876c0376..a66261dbb 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/interface.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/interface.go
@@ -31,7 +31,7 @@ type ICustomResource interface {
GetSpec() ICRSpec
GetRuntime() ICustomResourceRuntime
GetRootServiceTemplates() ([]*ServiceTemplate, bool)
- GetReconciling() *Reconciling
+ GetReconcile() *ChiReconcile
WalkClusters(f func(cluster ICluster) error) []error
WalkHosts(func(host *Host) error) []error
@@ -131,6 +131,7 @@ type ICluster interface {
SelectSettingsSourceFrom(shard IShard, replica IReplica) any
GetRuntime() IClusterRuntime
+ GetReconcile() ClusterReconcile
GetServiceTemplate() (*ServiceTemplate, bool)
GetAncestor() ICluster
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
index 069d75475..0edef3cd0 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
@@ -464,12 +464,12 @@ func (cr *ClickHouseInstallation) IsTroubleshoot() bool {
return cr.GetSpecT().GetTroubleshoot().Value()
}
-// GetReconciling gets reconciling spec
-func (cr *ClickHouseInstallation) GetReconciling() *Reconciling {
+// GetReconcile gets reconcile spec
+func (cr *ClickHouseInstallation) GetReconcile() *ChiReconcile {
if cr == nil {
return nil
}
- return cr.GetSpecT().Reconciling
+ return cr.GetSpecT().Reconcile
}
// Copy makes copy of a CHI, filtering fields according to specified CopyOptions
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
index 836f91007..cebf8a6ec 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
@@ -37,37 +37,6 @@ type Cluster struct {
Runtime ChiClusterRuntime `json:"-" yaml:"-"`
}
-type ClusterReconcile struct {
- Runtime ReconcileRuntime `json:"runtime" yaml:"runtime"`
-}
-
-type ReconcileRuntime struct {
- ReconcileShardsThreadsNumber int `json:"reconcileShardsThreadsNumber,omitempty" yaml:"reconcileShardsThreadsNumber,omitempty"`
- ReconcileShardsMaxConcurrencyPercent int `json:"reconcileShardsMaxConcurrencyPercent,omitempty" yaml:"reconcileShardsMaxConcurrencyPercent,omitempty"`
-}
-
-func (r ReconcileRuntime) MergeFrom(from ReconcileRuntime, _type MergeType) ReconcileRuntime {
- switch _type {
- case MergeTypeFillEmptyValues:
- if r.ReconcileShardsThreadsNumber == 0 {
- r.ReconcileShardsThreadsNumber = from.ReconcileShardsThreadsNumber
- }
- if r.ReconcileShardsMaxConcurrencyPercent == 0 {
- r.ReconcileShardsMaxConcurrencyPercent = from.ReconcileShardsMaxConcurrencyPercent
- }
- case MergeTypeOverrideByNonEmptyValues:
- if from.ReconcileShardsThreadsNumber != 0 {
- // Override by non-empty values only
- r.ReconcileShardsThreadsNumber = from.ReconcileShardsThreadsNumber
- }
- if from.ReconcileShardsMaxConcurrencyPercent != 0 {
- // Override by non-empty values only
- r.ReconcileShardsMaxConcurrencyPercent = from.ReconcileShardsMaxConcurrencyPercent
- }
- }
- return r
-}
-
type ChiClusterRuntime struct {
Address ChiClusterAddress `json:"-" yaml:"-"`
CHI *ClickHouseInstallation `json:"-" yaml:"-" testdiff:"ignore"`
@@ -166,11 +135,6 @@ func (c *Cluster) GetSecret() *ClusterSecret {
return c.Secret
}
-// GetRuntime is a getter
-func (cluster *Cluster) GetRuntime() IClusterRuntime {
- return &cluster.Runtime
-}
-
// GetPDBManaged is a getter
func (cluster *Cluster) GetPDBManaged() *types.StringBool {
return cluster.PDBManaged
@@ -181,6 +145,16 @@ func (cluster *Cluster) GetPDBMaxUnavailable() *types.Int32 {
return cluster.PDBMaxUnavailable
}
+// GetReconcile is a getter
+func (cluster *Cluster) GetReconcile() ClusterReconcile {
+ return cluster.Reconcile
+}
+
+// GetRuntime is a getter
+func (cluster *Cluster) GetRuntime() IClusterRuntime {
+ return &cluster.Runtime
+}
+
// FillShardsReplicasExplicitlySpecified fills whether shard or replicas are explicitly specified
func (cluster *Cluster) FillShardsReplicasExplicitlySpecified() {
if len(cluster.Layout.Shards) > 0 {
@@ -254,12 +228,13 @@ func (cluster *Cluster) InheritFilesFrom(chi *ClickHouseInstallation) {
})
}
-// InheritReconcileFrom inherits reconcile runtime from CHI
-func (cluster *Cluster) InheritReconcileFrom(chi *ClickHouseInstallation) {
- if chi.Spec.Reconciling == nil {
+// InheritClusterReconcileFrom inherits reconcile runtime from CHI
+func (cluster *Cluster) InheritClusterReconcileFrom(chi *ClickHouseInstallation) {
+ if chi.Spec.Reconcile == nil {
return
}
- cluster.Reconcile.Runtime = cluster.Reconcile.Runtime.MergeFrom(chi.Spec.Reconciling.Runtime, MergeTypeFillEmptyValues)
+ cluster.Reconcile.Runtime = cluster.Reconcile.Runtime.MergeFrom(chi.Spec.Reconcile.Runtime, MergeTypeFillEmptyValues)
+ cluster.Reconcile.Host = cluster.Reconcile.Host.MergeFrom(chi.Spec.Reconcile.Host)
}
// InheritTemplatesFrom inherits templates from CHI
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go
index 552899d0f..57c8cef34 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go
@@ -168,7 +168,36 @@ type OperatorConfigRuntime struct {
// OperatorConfigWatch specifies watch section
type OperatorConfigWatch struct {
// Namespaces where operator watches for events
- Namespaces []string `json:"namespaces" yaml:"namespaces"`
+ Namespaces OperatorConfigWatchNamespaces `json:"namespaces" yaml:"namespaces"`
+}
+
+type OperatorConfigWatchNamespaces struct {
+ Include *types.Strings `json:"include" yaml:"include`
+ Exclude *types.Strings `json:"exclude" yaml:"exclude`
+}
+
+func (n *OperatorConfigWatchNamespaces) UnmarshalJSON(data []byte) error {
+ type OperatorConfigWatchNamespaces2 OperatorConfigWatchNamespaces
+ var namespaces OperatorConfigWatchNamespaces2
+ if err := json.Unmarshal(data, &namespaces); err == nil {
+ s := OperatorConfigWatchNamespaces{
+ Include: namespaces.Include,
+ Exclude: namespaces.Exclude,
+ }
+ *n = s
+ return nil
+ }
+
+ var sl []string
+ if err := json.Unmarshal(data, &sl); err == nil {
+ s := OperatorConfigWatchNamespaces{
+ Include: types.NewStrings(sl),
+ }
+ *n = s
+ return nil
+ }
+
+ return fmt.Errorf("unable to OperatorConfigWatchNamespaces.UnmarshalJSON()")
}
// OperatorConfigConfig specifies Config section
@@ -385,14 +414,7 @@ type OperatorConfigCHIRuntime struct {
// OperatorConfigReconcile specifies reconcile section
type OperatorConfigReconcile struct {
- Runtime struct {
- ReconcileCHIsThreadsNumber int `json:"reconcileCHIsThreadsNumber" yaml:"reconcileCHIsThreadsNumber"`
- ReconcileShardsThreadsNumber int `json:"reconcileShardsThreadsNumber" yaml:"reconcileShardsThreadsNumber"`
- ReconcileShardsMaxConcurrencyPercent int `json:"reconcileShardsMaxConcurrencyPercent" yaml:"reconcileShardsMaxConcurrencyPercent"`
-
- // DEPRECATED, is replaced with reconcileCHIsThreadsNumber
- ThreadsNumber int `json:"threadsNumber" yaml:"threadsNumber"`
- } `json:"runtime" yaml:"runtime"`
+ Runtime OperatorConfigReconcileRuntime `json:"runtime" yaml:"runtime"`
StatefulSet struct {
Create struct {
@@ -406,28 +428,140 @@ type OperatorConfigReconcile struct {
} `json:"update" yaml:"update"`
} `json:"statefulSet" yaml:"statefulSet"`
- Host OperatorConfigReconcileHost `json:"host" yaml:"host"`
+ Host ReconcileHost `json:"host" yaml:"host"`
+}
+
+type OperatorConfigReconcileRuntime struct {
+ ReconcileCHIsThreadsNumber int `json:"reconcileCHIsThreadsNumber" yaml:"reconcileCHIsThreadsNumber"`
+ ReconcileShardsThreadsNumber int `json:"reconcileShardsThreadsNumber" yaml:"reconcileShardsThreadsNumber"`
+ ReconcileShardsMaxConcurrencyPercent int `json:"reconcileShardsMaxConcurrencyPercent" yaml:"reconcileShardsMaxConcurrencyPercent"`
+
+ // DEPRECATED, is replaced with reconcileCHIsThreadsNumber
+ ThreadsNumber int `json:"threadsNumber" yaml:"threadsNumber"`
+}
+
+// ReconcileHost defines reconcile host config
+type ReconcileHost struct {
+ Wait ReconcileHostWait `json:"wait" yaml:"wait"`
}
-// OperatorConfigReconcileHost defines reconcile host config
-type OperatorConfigReconcileHost struct {
- Wait OperatorConfigReconcileHostWait `json:"wait" yaml:"wait"`
+func (rh ReconcileHost) Normalize() ReconcileHost {
+ rh.Wait = rh.Wait.Normalize()
+ return rh
}
-// OperatorConfigReconcileHostWait defines reconcile host wait config
-type OperatorConfigReconcileHostWait struct {
- Exclude *types.StringBool `json:"exclude,omitempty" yaml:"exclude,omitempty"`
- Queries *types.StringBool `json:"queries,omitempty" yaml:"queries,omitempty"`
- Include *types.StringBool `json:"include,omitempty" yaml:"include,omitempty"`
- Replicas *OperatorConfigReconcileHostWaitReplicas `json:"replicas,omitempty" yaml:"replicas,omitempty"`
+func (rh ReconcileHost) MergeFrom(from ReconcileHost) ReconcileHost {
+ rh.Wait = rh.Wait.MergeFrom(from.Wait)
+ return rh
}
-type OperatorConfigReconcileHostWaitReplicas struct {
+// ReconcileHostWait defines reconcile host wait config
+type ReconcileHostWait struct {
+ Exclude *types.StringBool `json:"exclude,omitempty" yaml:"exclude,omitempty"`
+ Queries *types.StringBool `json:"queries,omitempty" yaml:"queries,omitempty"`
+ Include *types.StringBool `json:"include,omitempty" yaml:"include,omitempty"`
+ Replicas *ReconcileHostWaitReplicas `json:"replicas,omitempty" yaml:"replicas,omitempty"`
+ Probes *ReconcileHostWaitProbes `json:"probes,omitempty" yaml:"probes,omitempty"`
+}
+
+func (wait ReconcileHostWait) Normalize() ReconcileHostWait {
+ if wait.Replicas == nil {
+ wait.Replicas = &ReconcileHostWaitReplicas{}
+ }
+
+ if wait.Replicas.Delay == nil {
+ // Default update timeout in seconds
+ wait.Replicas.Delay = types.NewInt32(defaultMaxReplicationDelay)
+ }
+
+ if wait.Probes == nil {
+ // Default value
+ wait.Probes = &ReconcileHostWaitProbes{
+ Readiness: types.NewStringBool(true),
+ }
+ }
+
+ return wait
+}
+
+func (wait ReconcileHostWait) MergeFrom(from ReconcileHostWait) ReconcileHostWait {
+ wait.Exclude = wait.Exclude.MergeFrom(from.Exclude)
+ wait.Queries = wait.Queries.MergeFrom(from.Queries)
+ wait.Include = wait.Include.MergeFrom(from.Include)
+ wait.Replicas = wait.Replicas.MergeFrom(from.Replicas)
+ wait.Probes = wait.Probes.MergeFrom(from.Probes)
+
+ return wait
+}
+
+type ReconcileHostWaitReplicas struct {
All *types.StringBool `json:"all,omitempty" yaml:"all,omitempty"`
New *types.StringBool `json:"new,omitempty" yaml:"new,omitempty"`
Delay *types.Int32 `json:"delay,omitempty" yaml:"delay,omitempty"`
}
+func (r *ReconcileHostWaitReplicas) MergeFrom(from *ReconcileHostWaitReplicas) *ReconcileHostWaitReplicas {
+ if from == nil {
+ // Nothing to merge from, keep original value
+ return r
+ }
+
+ // From now on we have `from` specified
+
+ if r == nil {
+ // Recipient is not specified, just use `from` value
+ return from
+ }
+
+ // Both recipient and `from` are specified, need to walk over fields
+
+ r.All = r.All.MergeFrom(from.All)
+ r.New = r.New.MergeFrom(from.New)
+ r.Delay = r.Delay.MergeFrom(from.Delay)
+
+ return r
+}
+
+type ReconcileHostWaitProbes struct {
+ Startup *types.StringBool `json:"startup,omitempty" yaml:"startup,omitempty"`
+ Readiness *types.StringBool `json:"readiness,omitempty" yaml:"readiness,omitempty"`
+}
+
+func (p *ReconcileHostWaitProbes) GetStartup() *types.StringBool {
+ if p == nil {
+ return nil
+ }
+ return p.Startup
+}
+
+func (p *ReconcileHostWaitProbes) GetReadiness() *types.StringBool {
+ if p == nil {
+ return nil
+ }
+ return p.Readiness
+}
+
+func (p *ReconcileHostWaitProbes) MergeFrom(from *ReconcileHostWaitProbes) *ReconcileHostWaitProbes {
+ if from == nil {
+ // Nothing to merge from, keep original value
+ return p
+ }
+
+ // From now on we have `from` specified
+
+ if p == nil {
+ // Recipient is not specified, just use `from` value
+ return from
+ }
+
+ // Both recipient and `from` are specified, need to walk over fields
+
+ p.Startup = p.Startup.MergeFrom(from.Startup)
+ p.Readiness = p.Readiness.MergeFrom(from.Readiness)
+
+ return p
+}
+
// OperatorConfigAnnotation specifies annotation section
type OperatorConfigAnnotation struct {
// When transferring annotations from the chi/chit.metadata to CHI objects, use these filters.
@@ -837,15 +971,7 @@ func (c *OperatorConfig) normalizeSectionReconcileStatefulSet() {
}
func (c *OperatorConfig) normalizeSectionReconcileHost() {
- // Timeouts
- if c.Reconcile.Host.Wait.Replicas == nil {
- c.Reconcile.Host.Wait.Replicas = &OperatorConfigReconcileHostWaitReplicas{}
- }
-
- if c.Reconcile.Host.Wait.Replicas.Delay == nil {
- // Default update timeout in seconds
- c.Reconcile.Host.Wait.Replicas.Delay = types.NewInt32(defaultMaxReplicationDelay)
- }
+ c.Reconcile.Host = c.Reconcile.Host.Normalize()
}
func (c *OperatorConfig) normalizeSectionClickHouseConfigurationUserDefault() {
@@ -1003,47 +1129,61 @@ func (c *OperatorConfig) normalize() {
func (c *OperatorConfig) applyEnvVarParams() {
if ns := os.Getenv(deployment.WATCH_NAMESPACE); len(ns) > 0 {
// We have WATCH_NAMESPACE explicitly specified
- c.Watch.Namespaces = []string{ns}
+ c.Watch.Namespaces.Include = types.NewStrings([]string{ns})
}
if nss := os.Getenv(deployment.WATCH_NAMESPACES); len(nss) > 0 {
// We have WATCH_NAMESPACES explicitly specified
- namespaces := strings.FieldsFunc(nss, func(r rune) bool {
- return r == ':' || r == ','
- })
- c.Watch.Namespaces = []string{}
- for i := range namespaces {
- if len(namespaces[i]) > 0 {
- c.Watch.Namespaces = append(c.Watch.Namespaces, namespaces[i])
- }
+ if namespaces := c.splitNamespaces(nss); len(namespaces) > 0 {
+ c.Watch.Namespaces.Include = types.NewStrings(namespaces)
}
}
+
+ if nss := os.Getenv(deployment.WATCH_NAMESPACES_EXCLUDE); len(nss) > 0 {
+ // We have WATCH_NAMESPACES_EXCLUDE explicitly specified
+ if namespaces := c.splitNamespaces(nss); len(namespaces) > 0 {
+ c.Watch.Namespaces.Exclude = types.NewStrings(namespaces)
+ }
+ }
+}
+
+func (c *OperatorConfig) splitNamespaces(combined string) (namespaces []string) {
+ candidates := strings.FieldsFunc(combined, func(r rune) bool {
+ return r == ':' || r == ','
+ })
+ for _, str := range candidates {
+ candidate := strings.TrimSpace(str)
+ if len(candidate) > 0 {
+ namespaces = append(namespaces, candidate)
+ }
+ }
+ return namespaces
}
// applyDefaultWatchNamespace applies default watch namespace in case none specified earlier
func (c *OperatorConfig) applyDefaultWatchNamespace() {
- // In case we have watched namespaces specified, all is fine
- // In case we do not have watched namespaces specified, we need to decide, what namespace to watch.
+ // In case we have watched namespaces specified explicitly, all is fine
+ // In case we do not have watched namespaces specified, we need to decide, what namespace(s) to watch.
// In this case, there are two options:
- // 1. Operator runs in kube-system namespace - assume this is global installation, need to watch ALL namespaces
+ // 1. Operator runs in 'kube-system' namespace - assume this is global installation, need to watch ALL namespaces
// 2. Operator runs in other (non kube-system) namespace - assume this is local installation, watch this namespace only
- // Watch in own namespace only in case no other specified earlier
+ // Main idea is to watch in own namespace only in case no other specified explicitly and non-global deploy (not in 'kube-system')
- if len(c.Watch.Namespaces) > 0 {
- // We have namespace(s) specified already
+ if c.Watch.Namespaces.Include.HasValue() {
+ // We have namespace(s) explicitly specified already, all is good
return
}
- // No namespaces specified
+ // No namespace(s) specified explicitly, need to infer
- if c.Runtime.Namespace == "kube-system" {
+ if c.Runtime.Namespace == meta.NamespaceSystem {
// Operator is running in system namespace
// Do nothing, we already have len(config.WatchNamespaces) == 0
} else {
- // Operator is running is explicit namespace. Watch in it
- c.Watch.Namespaces = []string{
+ // Operator is running inside a namespace. Watch in it
+ c.Watch.Namespaces.Include = types.NewStrings([]string{
c.Runtime.Namespace,
- }
+ })
}
}
@@ -1119,15 +1259,42 @@ func (c *OperatorConfig) copyWithHiddenCredentials() *OperatorConfig {
return conf
}
-// IsWatchedNamespace returns whether specified namespace is in a list of watched
+// IsNamespaceWatched returns whether specified namespace is in a list of watched
// TODO unify with GetInformerNamespace
-func (c *OperatorConfig) IsWatchedNamespace(namespace string) bool {
- // In case no namespaces specified - watch all namespaces
- if len(c.Watch.Namespaces) == 0 {
+func (c *OperatorConfig) IsNamespaceWatched(namespace string) bool {
+ return c.isNamespaceIncluded(namespace) && !c.isNamespaceExcluded(namespace)
+}
+
+func (c *OperatorConfig) isNamespaceIncluded(namespace string) bool {
+ switch {
+ // In case no included namespaces specified - consider all namespaces included
+ case !c.Watch.Namespaces.Include.HasValue():
return true
+
+ // In case matches any included namespaces regexp specified - consider it included
+ case c.Watch.Namespaces.Include.Match(namespace):
+ return true
+
+ // No match to explicitly specified set of namespace regexp - not included
+ default:
+ return false
}
+}
+
+func (c *OperatorConfig) isNamespaceExcluded(namespace string) bool {
+ switch {
+ // In case no excluded namespaces specified - consider not excluded
+ case !c.Watch.Namespaces.Exclude.HasValue():
+ return false
- return util.InArrayWithRegexp(namespace, c.Watch.Namespaces)
+ // In case matches any excluded namespaces regexp specified - consider it excluded
+ case c.Watch.Namespaces.Exclude.Match(namespace):
+ return true
+
+ // No match to explicitly specified set of namespace regexp - not excluded
+ default:
+ return false
+ }
}
// GetInformerNamespace is a TODO stub
@@ -1136,12 +1303,13 @@ func (c *OperatorConfig) IsWatchedNamespace(namespace string) bool {
// be it explicitly specified namespace or empty line for "all namespaces".
// That's what conflicts with CHOp's approach to 'specify list of namespaces to watch in', having
// slice of namespaces (CHOp's approach) incompatible with "one namespace name" approach
-// TODO unify with IsWatchedNamespace
+// TODO unify with IsNamespaceWatched
// TODO unify approaches to multiple namespaces support
func (c *OperatorConfig) GetInformerNamespace() string {
// Namespace where informers would watch notifications from
namespace := meta.NamespaceAll
- if len(c.Watch.Namespaces) == 1 {
+
+ if c.Watch.Namespaces.Include.Len() == 1 {
// We have exactly one watch namespace specified
// This scenario is implemented in go-client
// In any other case, just keep metav1.NamespaceAll
@@ -1149,8 +1317,8 @@ func (c *OperatorConfig) GetInformerNamespace() string {
// This contradicts current implementation of multiple namespaces in config's watchNamespaces field,
// but k8s has possibility to specify one/all namespaces only, no 'multiple namespaces' option
var labelRegexp = regexp.MustCompile("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$")
- if labelRegexp.MatchString(c.Watch.Namespaces[0]) {
- namespace = c.Watch.Namespaces[0]
+ if labelRegexp.MatchString(c.Watch.Namespaces.Include.First()) {
+ namespace = c.Watch.Namespaces.Include.First()
}
}
@@ -1182,7 +1350,7 @@ func (c *OperatorConfig) GetRevisionHistoryLimit() *int32 {
func (c *OperatorConfig) move() {
// WatchNamespaces where operator watches for events
if len(c.WatchNamespaces) > 0 {
- c.Watch.Namespaces = c.WatchNamespaces
+ c.Watch.Namespaces.Include = types.NewStrings(c.WatchNamespaces)
}
if c.CHCommonConfigsPath != "" {
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go b/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go
new file mode 100644
index 000000000..3cbbb0f91
--- /dev/null
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_reconcile.go
@@ -0,0 +1,192 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "strings"
+ "time"
+)
+
+// ChiReconcile defines reconcile specification
+type ChiReconcile struct {
+ // About to be DEPRECATED
+ Policy string `json:"policy,omitempty" yaml:"policy,omitempty"`
+
+ // ConfigMapPropagationTimeout specifies timeout for ConfigMap to propagate
+ ConfigMapPropagationTimeout int `json:"configMapPropagationTimeout,omitempty" yaml:"configMapPropagationTimeout,omitempty"`
+ // Cleanup specifies cleanup behavior
+ Cleanup *Cleanup `json:"cleanup,omitempty" yaml:"cleanup,omitempty"`
+ // Macros specifies macros application rules
+ Macros ReconcileMacros `json:"macros,omitempty" yaml:"macros,omitempty"`
+
+ // Runtime specifies runtime settings
+ Runtime ReconcileRuntime `json:"runtime,omitempty" yaml:"runtime,omitempty"`
+ // Host specifies host-lever reconcile settings
+ Host ReconcileHost `json:"host" yaml:"host"`
+}
+
+type ClusterReconcile struct {
+ // Runtime specifies runtime settings
+ Runtime ReconcileRuntime `json:"runtime" yaml:"runtime"`
+ // Host specifies host-lever reconcile settings
+ Host ReconcileHost `json:"host" yaml:"host"`
+}
+
+// NewChiReconcile creates new reconcile
+func NewChiReconcile() *ChiReconcile {
+ return new(ChiReconcile)
+}
+
+// MergeFrom merges from specified reconcile
+func (r *ChiReconcile) MergeFrom(from *ChiReconcile, _type MergeType) *ChiReconcile {
+ if from == nil {
+ return r
+ }
+
+ if r == nil {
+ r = NewChiReconcile()
+ }
+
+ switch _type {
+ case MergeTypeFillEmptyValues:
+ if r.Policy == "" {
+ r.Policy = from.Policy
+ }
+ if r.ConfigMapPropagationTimeout == 0 {
+ r.ConfigMapPropagationTimeout = from.ConfigMapPropagationTimeout
+ }
+ case MergeTypeOverrideByNonEmptyValues:
+ if from.Policy != "" {
+ // Override by non-empty values only
+ r.Policy = from.Policy
+ }
+ if from.ConfigMapPropagationTimeout != 0 {
+ // Override by non-empty values only
+ r.ConfigMapPropagationTimeout = from.ConfigMapPropagationTimeout
+ }
+ }
+
+ r.Cleanup = r.Cleanup.MergeFrom(from.Cleanup, _type)
+ r.Macros = r.Macros.MergeFrom(from.Macros, _type)
+ r.Runtime = r.Runtime.MergeFrom(from.Runtime, _type)
+ r.Host = r.Host.MergeFrom(from.Host)
+
+ return r
+}
+
+// SetDefaults set default values for reconcile
+func (r *ChiReconcile) SetDefaults() *ChiReconcile {
+ if r == nil {
+ return nil
+ }
+ r.Policy = ReconcilingPolicyUnspecified
+ r.ConfigMapPropagationTimeout = 10
+ r.Cleanup = NewCleanup().SetDefaults()
+ return r
+}
+
+// GetPolicy gets policy
+func (r *ChiReconcile) GetPolicy() string {
+ if r == nil {
+ return ""
+ }
+ return r.Policy
+}
+
+// SetPolicy sets policy
+func (r *ChiReconcile) SetPolicy(p string) {
+ if r == nil {
+ return
+ }
+ r.Policy = p
+}
+
+func (r *ChiReconcile) HasConfigMapPropagationTimeout() bool {
+ return r.GetConfigMapPropagationTimeout() > 0
+}
+
+// GetConfigMapPropagationTimeout gets config map propagation timeout
+func (r *ChiReconcile) GetConfigMapPropagationTimeout() int {
+ if r == nil {
+ return 0
+ }
+ return r.ConfigMapPropagationTimeout
+}
+
+// SetConfigMapPropagationTimeout sets config map propagation timeout
+func (r *ChiReconcile) SetConfigMapPropagationTimeout(timeout int) {
+ if r == nil {
+ return
+ }
+ r.ConfigMapPropagationTimeout = timeout
+}
+
+// GetConfigMapPropagationTimeoutDuration gets config map propagation timeout duration
+func (r *ChiReconcile) GetConfigMapPropagationTimeoutDuration() time.Duration {
+ if r == nil {
+ return 0
+ }
+ return time.Duration(r.GetConfigMapPropagationTimeout()) * time.Second
+}
+
+// Possible reconcile policy values
+const (
+ ReconcilingPolicyUnspecified = "unspecified"
+ ReconcilingPolicyWait = "wait"
+ ReconcilingPolicyNoWait = "nowait"
+)
+
+// IsReconcilingPolicyWait checks whether reconcile policy is "wait"
+func (r *ChiReconcile) IsReconcilingPolicyWait() bool {
+ return strings.ToLower(r.GetPolicy()) == ReconcilingPolicyWait
+}
+
+// IsReconcilingPolicyNoWait checks whether reconcile policy is "no wait"
+func (r *ChiReconcile) IsReconcilingPolicyNoWait() bool {
+ return strings.ToLower(r.GetPolicy()) == ReconcilingPolicyNoWait
+}
+
+// GetCleanup gets cleanup
+func (r *ChiReconcile) GetCleanup() *Cleanup {
+ if r == nil {
+ return nil
+ }
+ return r.Cleanup
+}
+
+// GetCleanup gets cleanup
+func (r *ChiReconcile) SetCleanup(cleanup *Cleanup) {
+ if r == nil {
+ return
+ }
+ r.Cleanup = cleanup
+}
+
+func (r *ChiReconcile) InheritRuntimeFrom(from OperatorConfigReconcileRuntime) {
+ if r == nil {
+ return
+ }
+
+ if r.Runtime.ReconcileShardsThreadsNumber == 0 {
+ r.Runtime.ReconcileShardsThreadsNumber = from.ReconcileShardsThreadsNumber
+ }
+ if r.Runtime.ReconcileShardsMaxConcurrencyPercent == 0 {
+ r.Runtime.ReconcileShardsMaxConcurrencyPercent = from.ReconcileShardsMaxConcurrencyPercent
+ }
+}
+
+func (r *ChiReconcile) InheritHostFrom(from ReconcileHost) {
+ r.Host = r.Host.MergeFrom(from)
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_reconcile_macros.go b/pkg/apis/clickhouse.altinity.com/v1/type_reconcile_macros.go
new file mode 100644
index 000000000..54263277f
--- /dev/null
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_reconcile_macros.go
@@ -0,0 +1,57 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+)
+
+type ReconcileMacros struct {
+ Sections MacrosSections `json:"sections,omitempty" yaml:"sections,omitempty"`
+}
+
+// MergeFrom merges from specified reconcile
+func (t ReconcileMacros) MergeFrom(from ReconcileMacros, _type MergeType) ReconcileMacros {
+ t.Sections = t.Sections.MergeFrom(from.Sections, _type)
+ return t
+}
+
+type MacrosSections struct {
+ Users MacrosSection `json:"users,omitempty" yaml:"users,omitempty"`
+ Profiles MacrosSection `json:"profiles,omitempty" yaml:"profiles,omitempty"`
+ Quotas MacrosSection `json:"quotas,omitempty" yaml:"quotas,omitempty"`
+ Settings MacrosSection `json:"settings,omitempty" yaml:"settings,omitempty"`
+ Files MacrosSection `json:"files,omitempty" yaml:"files,omitempty"`
+}
+
+// MergeFrom merges from specified macros
+func (t MacrosSections) MergeFrom(from MacrosSections, _type MergeType) MacrosSections {
+ t.Users = t.Users.MergeFrom(from.Users, _type)
+ t.Profiles = t.Profiles.MergeFrom(from.Profiles, _type)
+ t.Quotas = t.Quotas.MergeFrom(from.Quotas, _type)
+ t.Settings = t.Settings.MergeFrom(from.Settings, _type)
+ t.Files = t.Files.MergeFrom(from.Files, _type)
+ return t
+}
+
+type MacrosSection struct {
+ Enabled *types.StringBool `json:"enabled,omitempty" yaml:"enabled,omitempty"`
+}
+
+// MergeFrom merges from specified macros
+func (t MacrosSection) MergeFrom(from MacrosSection, _type MergeType) MacrosSection {
+ t.Enabled = t.Enabled.MergeFrom(from.Enabled)
+ return t
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_reconcile_runtime.go b/pkg/apis/clickhouse.altinity.com/v1/type_reconcile_runtime.go
new file mode 100644
index 000000000..f30dfcee7
--- /dev/null
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_reconcile_runtime.go
@@ -0,0 +1,42 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+type ReconcileRuntime struct {
+ ReconcileShardsThreadsNumber int `json:"reconcileShardsThreadsNumber,omitempty" yaml:"reconcileShardsThreadsNumber,omitempty"`
+ ReconcileShardsMaxConcurrencyPercent int `json:"reconcileShardsMaxConcurrencyPercent,omitempty" yaml:"reconcileShardsMaxConcurrencyPercent,omitempty"`
+}
+
+func (r ReconcileRuntime) MergeFrom(from ReconcileRuntime, _type MergeType) ReconcileRuntime {
+ switch _type {
+ case MergeTypeFillEmptyValues:
+ if r.ReconcileShardsThreadsNumber == 0 {
+ r.ReconcileShardsThreadsNumber = from.ReconcileShardsThreadsNumber
+ }
+ if r.ReconcileShardsMaxConcurrencyPercent == 0 {
+ r.ReconcileShardsMaxConcurrencyPercent = from.ReconcileShardsMaxConcurrencyPercent
+ }
+ case MergeTypeOverrideByNonEmptyValues:
+ if from.ReconcileShardsThreadsNumber != 0 {
+ // Override by non-empty values only
+ r.ReconcileShardsThreadsNumber = from.ReconcileShardsThreadsNumber
+ }
+ if from.ReconcileShardsMaxConcurrencyPercent != 0 {
+ // Override by non-empty values only
+ r.ReconcileShardsMaxConcurrencyPercent = from.ReconcileShardsMaxConcurrencyPercent
+ }
+ }
+ return r
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_reconciling.go b/pkg/apis/clickhouse.altinity.com/v1/type_reconciling.go
deleted file mode 100644
index 87b22f352..000000000
--- a/pkg/apis/clickhouse.altinity.com/v1/type_reconciling.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v1
-
-import (
- "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
- "strings"
- "time"
-)
-
-// Reconciling defines reconciling specification
-type Reconciling struct {
- // About to be DEPRECATED
- Policy string `json:"policy,omitempty" yaml:"policy,omitempty"`
- // ConfigMapPropagationTimeout specifies timeout for ConfigMap to propagate
- ConfigMapPropagationTimeout int `json:"configMapPropagationTimeout,omitempty" yaml:"configMapPropagationTimeout,omitempty"`
- // Cleanup specifies cleanup behavior
- Cleanup *Cleanup `json:"cleanup,omitempty" yaml:"cleanup,omitempty"`
- // Runtime specifies runtime settings
- Runtime ReconcileRuntime `json:"runtime,omitempty" yaml:"runtime,omitempty"`
- Macros Macros `json:"macros,omitempty" yaml:"macros,omitempty"`
-}
-
-type Macros struct {
- Sections MacrosSections `json:"sections,omitempty" yaml:"sections,omitempty"`
-}
-
-func newMacros() *Macros {
- return new(Macros)
-}
-
-// MergeFrom merges from specified reconciling
-func (t Macros) MergeFrom(from Macros, _type MergeType) Macros {
- t.Sections = t.Sections.MergeFrom(from.Sections, _type)
- return t
-}
-
-type MacrosSections struct {
- Users MacrosSection `json:"users,omitempty" yaml:"users,omitempty"`
- Profiles MacrosSection `json:"profiles,omitempty" yaml:"profiles,omitempty"`
- Quotas MacrosSection `json:"quotas,omitempty" yaml:"quotas,omitempty"`
- Settings MacrosSection `json:"settings,omitempty" yaml:"settings,omitempty"`
- Files MacrosSection `json:"files,omitempty" yaml:"files,omitempty"`
-}
-
-// MergeFrom merges from specified reconciling
-func (t MacrosSections) MergeFrom(from MacrosSections, _type MergeType) MacrosSections {
- t.Users = t.Users.MergeFrom(from.Users, _type)
- t.Profiles = t.Profiles.MergeFrom(from.Profiles, _type)
- t.Quotas = t.Quotas.MergeFrom(from.Quotas, _type)
- t.Settings = t.Settings.MergeFrom(from.Settings, _type)
- t.Files = t.Files.MergeFrom(from.Files, _type)
- return t
-}
-
-type MacrosSection struct {
- Enabled *types.StringBool `json:"enabled,omitempty" yaml:"enabled,omitempty"`
-}
-
-// MergeFrom merges from specified reconciling
-func (t MacrosSection) MergeFrom(from MacrosSection, _type MergeType) MacrosSection {
- t.Enabled = t.Enabled.MergeFrom(from.Enabled)
- return t
-}
-
-// NewReconciling creates new reconciling
-func NewReconciling() *Reconciling {
- return new(Reconciling)
-}
-
-// MergeFrom merges from specified reconciling
-func (t *Reconciling) MergeFrom(from *Reconciling, _type MergeType) *Reconciling {
- if from == nil {
- return t
- }
-
- if t == nil {
- t = NewReconciling()
- }
-
- switch _type {
- case MergeTypeFillEmptyValues:
- if t.Policy == "" {
- t.Policy = from.Policy
- }
- if t.ConfigMapPropagationTimeout == 0 {
- t.ConfigMapPropagationTimeout = from.ConfigMapPropagationTimeout
- }
- case MergeTypeOverrideByNonEmptyValues:
- if from.Policy != "" {
- // Override by non-empty values only
- t.Policy = from.Policy
- }
- if from.ConfigMapPropagationTimeout != 0 {
- // Override by non-empty values only
- t.ConfigMapPropagationTimeout = from.ConfigMapPropagationTimeout
- }
- }
-
- t.Cleanup = t.Cleanup.MergeFrom(from.Cleanup, _type)
- t.Runtime = t.Runtime.MergeFrom(from.Runtime, _type)
- t.Macros = t.Macros.MergeFrom(from.Macros, _type)
-
- return t
-}
-
-// SetDefaults set default values for reconciling
-func (t *Reconciling) SetDefaults() *Reconciling {
- if t == nil {
- return nil
- }
- t.Policy = ReconcilingPolicyUnspecified
- t.ConfigMapPropagationTimeout = 10
- t.Cleanup = NewCleanup().SetDefaults()
- return t
-}
-
-// GetPolicy gets policy
-func (t *Reconciling) GetPolicy() string {
- if t == nil {
- return ""
- }
- return t.Policy
-}
-
-// SetPolicy sets policy
-func (t *Reconciling) SetPolicy(p string) {
- if t == nil {
- return
- }
- t.Policy = p
-}
-
-func (t *Reconciling) HasConfigMapPropagationTimeout() bool {
- return t.GetConfigMapPropagationTimeout() > 0
-}
-
-// GetConfigMapPropagationTimeout gets config map propagation timeout
-func (t *Reconciling) GetConfigMapPropagationTimeout() int {
- if t == nil {
- return 0
- }
- return t.ConfigMapPropagationTimeout
-}
-
-// SetConfigMapPropagationTimeout sets config map propagation timeout
-func (t *Reconciling) SetConfigMapPropagationTimeout(timeout int) {
- if t == nil {
- return
- }
- t.ConfigMapPropagationTimeout = timeout
-}
-
-// GetConfigMapPropagationTimeoutDuration gets config map propagation timeout duration
-func (t *Reconciling) GetConfigMapPropagationTimeoutDuration() time.Duration {
- if t == nil {
- return 0
- }
- return time.Duration(t.GetConfigMapPropagationTimeout()) * time.Second
-}
-
-// Possible reconcile policy values
-const (
- ReconcilingPolicyUnspecified = "unspecified"
- ReconcilingPolicyWait = "wait"
- ReconcilingPolicyNoWait = "nowait"
-)
-
-// IsReconcilingPolicyWait checks whether reconcile policy is "wait"
-func (t *Reconciling) IsReconcilingPolicyWait() bool {
- return strings.ToLower(t.GetPolicy()) == ReconcilingPolicyWait
-}
-
-// IsReconcilingPolicyNoWait checks whether reconcile policy is "no wait"
-func (t *Reconciling) IsReconcilingPolicyNoWait() bool {
- return strings.ToLower(t.GetPolicy()) == ReconcilingPolicyNoWait
-}
-
-// GetCleanup gets cleanup
-func (t *Reconciling) GetCleanup() *Cleanup {
- if t == nil {
- return nil
- }
- return t.Cleanup
-}
-
-// GetCleanup gets cleanup
-func (t *Reconciling) SetCleanup(cleanup *Cleanup) {
- if t == nil {
- return
- }
- t.Cleanup = cleanup
-}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_setting.go b/pkg/apis/clickhouse.altinity.com/v1/type_setting.go
index 5364dc070..ea1740afe 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_setting.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_setting.go
@@ -291,6 +291,8 @@ func (s *Setting) FetchDataSourceAddress(defaultNamespace string, parseScalarStr
}
case SettingTypeSource:
// Fetch k8s address of the field from the source ref
+ // 1. The name of the secret to select from. Namespace is expected to be provided externally
+ // 2. The key of the secret to select from.
name, key := s.GetNameKey()
return types.ObjectAddress{
Namespace: defaultNamespace,
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_setting_data_source.go b/pkg/apis/clickhouse.altinity.com/v1/type_setting_data_source.go
index 466d77a2f..f9f690708 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_setting_data_source.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_setting_data_source.go
@@ -45,6 +45,8 @@ func NewSettingSourceFromAny(untyped any) (*Setting, bool) {
}
// GetNameKey gets name and key from the secret ref
+// 1. The name of the secret to select from. Namespace is expected to be provided externally
+// 2. The key of the secret to select from.
func (s *SettingSource) GetNameKey() (string, string) {
if ref := s.GetSecretKeyRef(); ref != nil {
return ref.Name, ref.Key
@@ -94,6 +96,8 @@ func (s *Setting) IsSource() bool {
}
// GetNameKey gets name and key of source setting
+// 1. The name of the secret to select from. Namespace is expected to be provided externally
+// 2. The key of the secret to select from.
func (s *Setting) GetNameKey() (string, string) {
if ref := s.GetSecretKeyRef(); ref != nil {
return ref.Name, ref.Key
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_spec.go b/pkg/apis/clickhouse.altinity.com/v1/type_spec.go
index f8f780e4e..650a73849 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_spec.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_spec.go
@@ -27,7 +27,8 @@ type ChiSpec struct {
Suspend *types.StringBool `json:"suspend,omitempty" yaml:"suspend,omitempty"`
NamespaceDomainPattern *types.String `json:"namespaceDomainPattern,omitempty" yaml:"namespaceDomainPattern,omitempty"`
Templating *ChiTemplating `json:"templating,omitempty" yaml:"templating,omitempty"`
- Reconciling *Reconciling `json:"reconciling,omitempty" yaml:"reconciling,omitempty"`
+ Reconciling *ChiReconcile `json:"reconciling,omitempty" yaml:"reconciling,omitempty"`
+ Reconcile *ChiReconcile `json:"reconcile,omitempty" yaml:"reconcile,omitempty"`
Defaults *Defaults `json:"defaults,omitempty" yaml:"defaults,omitempty"`
Configuration *Configuration `json:"configuration,omitempty" yaml:"configuration,omitempty"`
Templates *Templates `json:"templates,omitempty" yaml:"templates,omitempty"`
@@ -162,7 +163,7 @@ func (spec *ChiSpec) MergeFrom(from *ChiSpec, _type MergeType) {
}
spec.Templating = spec.Templating.MergeFrom(from.Templating, _type)
- spec.Reconciling = spec.Reconciling.MergeFrom(from.Reconciling, _type)
+ spec.Reconcile = spec.Reconcile.MergeFrom(from.Reconcile, _type)
spec.Defaults = spec.Defaults.MergeFrom(from.Defaults, _type)
spec.Configuration = spec.Configuration.MergeFrom(from.Configuration, _type)
spec.Templates = spec.Templates.MergeFrom(from.Templates, _type)
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_zookeeper.go b/pkg/apis/clickhouse.altinity.com/v1/type_zookeeper.go
index 7a3e5f8c2..91c6da080 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_zookeeper.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_zookeeper.go
@@ -15,19 +15,23 @@
package v1
import (
- "gopkg.in/d4l3k/messagediff.v1"
"strings"
+
+ "gopkg.in/d4l3k/messagediff.v1"
+
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
)
// ZookeeperConfig defines zookeeper section of .spec.configuration
// Refers to
-// https://clickhouse.yandex/docs/en/single/index.html?#server-settings_zookeeper
+// https://clickhouse.com/docs/operations/server-configuration-parameters/settings#zookeeper
type ZookeeperConfig struct {
- Nodes ZookeeperNodes `json:"nodes,omitempty" yaml:"nodes,omitempty"`
- SessionTimeoutMs int `json:"session_timeout_ms,omitempty" yaml:"session_timeout_ms,omitempty"`
- OperationTimeoutMs int `json:"operation_timeout_ms,omitempty" yaml:"operation_timeout_ms,omitempty"`
- Root string `json:"root,omitempty" yaml:"root,omitempty"`
- Identity string `json:"identity,omitempty" yaml:"identity,omitempty"`
+ Nodes ZookeeperNodes `json:"nodes,omitempty" yaml:"nodes,omitempty"`
+ SessionTimeoutMs int `json:"session_timeout_ms,omitempty" yaml:"session_timeout_ms,omitempty"`
+ OperationTimeoutMs int `json:"operation_timeout_ms,omitempty" yaml:"operation_timeout_ms,omitempty"`
+ Root string `json:"root,omitempty" yaml:"root,omitempty"`
+ Identity string `json:"identity,omitempty" yaml:"identity,omitempty"`
+ UseCompression *types.StringBool `json:"use_compression,omitempty" yaml:"use_compression,omitempty"`
}
type ZookeeperNodes []ZookeeperNode
@@ -114,6 +118,7 @@ func (zkc *ZookeeperConfig) MergeFrom(from *ZookeeperConfig, _type MergeType) *Z
if from.Identity != "" {
zkc.Identity = from.Identity
}
+ zkc.UseCompression = zkc.UseCompression.MergeFrom(from.UseCompression)
return zkc
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
index ec7fdeef7..af6f349a9 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
@@ -182,6 +182,30 @@ func (in *ChiClusterRuntime) DeepCopy() *ChiClusterRuntime {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChiReconcile) DeepCopyInto(out *ChiReconcile) {
+ *out = *in
+ if in.Cleanup != nil {
+ in, out := &in.Cleanup, &out.Cleanup
+ *out = new(Cleanup)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Macros.DeepCopyInto(&out.Macros)
+ out.Runtime = in.Runtime
+ in.Host.DeepCopyInto(&out.Host)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiReconcile.
+func (in *ChiReconcile) DeepCopy() *ChiReconcile {
+ if in == nil {
+ return nil
+ }
+ out := new(ChiReconcile)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChiReplica) DeepCopyInto(out *ChiReplica) {
*out = *in
@@ -394,7 +418,12 @@ func (in *ChiSpec) DeepCopyInto(out *ChiSpec) {
}
if in.Reconciling != nil {
in, out := &in.Reconciling, &out.Reconciling
- *out = new(Reconciling)
+ *out = new(ChiReconcile)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Reconcile != nil {
+ in, out := &in.Reconcile, &out.Reconcile
+ *out = new(ChiReconcile)
(*in).DeepCopyInto(*out)
}
if in.Defaults != nil {
@@ -772,7 +801,7 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
*out = new(types.Int32)
**out = **in
}
- out.Reconcile = in.Reconcile
+ in.Reconcile.DeepCopyInto(&out.Reconcile)
if in.Layout != nil {
in, out := &in.Layout, &out.Layout
*out = new(ChiClusterLayout)
@@ -796,6 +825,7 @@ func (in *Cluster) DeepCopy() *Cluster {
func (in *ClusterReconcile) DeepCopyInto(out *ClusterReconcile) {
*out = *in
out.Runtime = in.Runtime
+ in.Host.DeepCopyInto(&out.Host)
return
}
@@ -1307,23 +1337,6 @@ func (in *HostsField) DeepCopy() *HostsField {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Macros) DeepCopyInto(out *Macros) {
- *out = *in
- in.Sections.DeepCopyInto(&out.Sections)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Macros.
-func (in *Macros) DeepCopy() *Macros {
- if in == nil {
- return nil
- }
- out := new(Macros)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MacrosSection) DeepCopyInto(out *MacrosSection) {
*out = *in
@@ -1797,85 +1810,17 @@ func (in *OperatorConfigReconcile) DeepCopy() *OperatorConfigReconcile {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorConfigReconcileHost) DeepCopyInto(out *OperatorConfigReconcileHost) {
+func (in *OperatorConfigReconcileRuntime) DeepCopyInto(out *OperatorConfigReconcileRuntime) {
*out = *in
- in.Wait.DeepCopyInto(&out.Wait)
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigReconcileHost.
-func (in *OperatorConfigReconcileHost) DeepCopy() *OperatorConfigReconcileHost {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigReconcileRuntime.
+func (in *OperatorConfigReconcileRuntime) DeepCopy() *OperatorConfigReconcileRuntime {
if in == nil {
return nil
}
- out := new(OperatorConfigReconcileHost)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorConfigReconcileHostWait) DeepCopyInto(out *OperatorConfigReconcileHostWait) {
- *out = *in
- if in.Exclude != nil {
- in, out := &in.Exclude, &out.Exclude
- *out = new(types.StringBool)
- **out = **in
- }
- if in.Queries != nil {
- in, out := &in.Queries, &out.Queries
- *out = new(types.StringBool)
- **out = **in
- }
- if in.Include != nil {
- in, out := &in.Include, &out.Include
- *out = new(types.StringBool)
- **out = **in
- }
- if in.Replicas != nil {
- in, out := &in.Replicas, &out.Replicas
- *out = new(OperatorConfigReconcileHostWaitReplicas)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigReconcileHostWait.
-func (in *OperatorConfigReconcileHostWait) DeepCopy() *OperatorConfigReconcileHostWait {
- if in == nil {
- return nil
- }
- out := new(OperatorConfigReconcileHostWait)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorConfigReconcileHostWaitReplicas) DeepCopyInto(out *OperatorConfigReconcileHostWaitReplicas) {
- *out = *in
- if in.All != nil {
- in, out := &in.All, &out.All
- *out = new(types.StringBool)
- **out = **in
- }
- if in.New != nil {
- in, out := &in.New, &out.New
- *out = new(types.StringBool)
- **out = **in
- }
- if in.Delay != nil {
- in, out := &in.Delay, &out.Delay
- *out = new(types.Int32)
- **out = **in
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigReconcileHostWaitReplicas.
-func (in *OperatorConfigReconcileHostWaitReplicas) DeepCopy() *OperatorConfigReconcileHostWaitReplicas {
- if in == nil {
- return nil
- }
- out := new(OperatorConfigReconcileHostWaitReplicas)
+ out := new(OperatorConfigReconcileRuntime)
in.DeepCopyInto(out)
return out
}
@@ -2065,11 +2010,7 @@ func (in *OperatorConfigUser) DeepCopy() *OperatorConfigUser {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OperatorConfigWatch) DeepCopyInto(out *OperatorConfigWatch) {
*out = *in
- if in.Namespaces != nil {
- in, out := &in.Namespaces, &out.Namespaces
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
+ in.Namespaces.DeepCopyInto(&out.Namespaces)
return
}
@@ -2083,6 +2024,40 @@ func (in *OperatorConfigWatch) DeepCopy() *OperatorConfigWatch {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorConfigWatchNamespaces) DeepCopyInto(out *OperatorConfigWatchNamespaces) {
+ *out = *in
+ if in.Include != nil {
+ in, out := &in.Include, &out.Include
+ *out = new(types.Strings)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ }
+ if in.Exclude != nil {
+ in, out := &in.Exclude, &out.Exclude
+ *out = new(types.Strings)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigWatchNamespaces.
+func (in *OperatorConfigWatchNamespaces) DeepCopy() *OperatorConfigWatchNamespaces {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorConfigWatchNamespaces)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDistribution) DeepCopyInto(out *PodDistribution) {
*out = *in
@@ -2192,40 +2167,149 @@ func (in *PortDistribution) DeepCopy() *PortDistribution {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ReconcileRuntime) DeepCopyInto(out *ReconcileRuntime) {
+func (in *ReconcileHost) DeepCopyInto(out *ReconcileHost) {
*out = *in
+ in.Wait.DeepCopyInto(&out.Wait)
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReconcileRuntime.
-func (in *ReconcileRuntime) DeepCopy() *ReconcileRuntime {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReconcileHost.
+func (in *ReconcileHost) DeepCopy() *ReconcileHost {
if in == nil {
return nil
}
- out := new(ReconcileRuntime)
+ out := new(ReconcileHost)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Reconciling) DeepCopyInto(out *Reconciling) {
+func (in *ReconcileHostWait) DeepCopyInto(out *ReconcileHostWait) {
*out = *in
- if in.Cleanup != nil {
- in, out := &in.Cleanup, &out.Cleanup
- *out = new(Cleanup)
+ if in.Exclude != nil {
+ in, out := &in.Exclude, &out.Exclude
+ *out = new(types.StringBool)
+ **out = **in
+ }
+ if in.Queries != nil {
+ in, out := &in.Queries, &out.Queries
+ *out = new(types.StringBool)
+ **out = **in
+ }
+ if in.Include != nil {
+ in, out := &in.Include, &out.Include
+ *out = new(types.StringBool)
+ **out = **in
+ }
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ *out = new(ReconcileHostWaitReplicas)
(*in).DeepCopyInto(*out)
}
- out.Runtime = in.Runtime
- in.Macros.DeepCopyInto(&out.Macros)
+ if in.Probes != nil {
+ in, out := &in.Probes, &out.Probes
+ *out = new(ReconcileHostWaitProbes)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReconcileHostWait.
+func (in *ReconcileHostWait) DeepCopy() *ReconcileHostWait {
+ if in == nil {
+ return nil
+ }
+ out := new(ReconcileHostWait)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReconcileHostWaitProbes) DeepCopyInto(out *ReconcileHostWaitProbes) {
+ *out = *in
+ if in.Startup != nil {
+ in, out := &in.Startup, &out.Startup
+ *out = new(types.StringBool)
+ **out = **in
+ }
+ if in.Readiness != nil {
+ in, out := &in.Readiness, &out.Readiness
+ *out = new(types.StringBool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReconcileHostWaitProbes.
+func (in *ReconcileHostWaitProbes) DeepCopy() *ReconcileHostWaitProbes {
+ if in == nil {
+ return nil
+ }
+ out := new(ReconcileHostWaitProbes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReconcileHostWaitReplicas) DeepCopyInto(out *ReconcileHostWaitReplicas) {
+ *out = *in
+ if in.All != nil {
+ in, out := &in.All, &out.All
+ *out = new(types.StringBool)
+ **out = **in
+ }
+ if in.New != nil {
+ in, out := &in.New, &out.New
+ *out = new(types.StringBool)
+ **out = **in
+ }
+ if in.Delay != nil {
+ in, out := &in.Delay, &out.Delay
+ *out = new(types.Int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReconcileHostWaitReplicas.
+func (in *ReconcileHostWaitReplicas) DeepCopy() *ReconcileHostWaitReplicas {
+ if in == nil {
+ return nil
+ }
+ out := new(ReconcileHostWaitReplicas)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReconcileMacros) DeepCopyInto(out *ReconcileMacros) {
+ *out = *in
+ in.Sections.DeepCopyInto(&out.Sections)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReconcileMacros.
+func (in *ReconcileMacros) DeepCopy() *ReconcileMacros {
+ if in == nil {
+ return nil
+ }
+ out := new(ReconcileMacros)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReconcileRuntime) DeepCopyInto(out *ReconcileRuntime) {
+ *out = *in
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Reconciling.
-func (in *Reconciling) DeepCopy() *Reconciling {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReconcileRuntime.
+func (in *ReconcileRuntime) DeepCopy() *ReconcileRuntime {
if in == nil {
return nil
}
- out := new(Reconciling)
+ out := new(ReconcileRuntime)
in.DeepCopyInto(out)
return out
}
@@ -2745,6 +2829,11 @@ func (in *ZookeeperConfig) DeepCopyInto(out *ZookeeperConfig) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.UseCompression != nil {
+ in, out := &in.UseCompression, &out.UseCompression
+ *out = new(types.StringBool)
+ **out = **in
+ }
return
}
diff --git a/pkg/apis/common/types/strings.go b/pkg/apis/common/types/strings.go
new file mode 100644
index 000000000..0a006d00b
--- /dev/null
+++ b/pkg/apis/common/types/strings.go
@@ -0,0 +1,123 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "github.com/altinity/clickhouse-operator/pkg/util"
+ "strings"
+)
+
+// Strings defines set of strings representation with possibility to be optional
+type Strings []string
+
+// NewStrings creates new variable
+func NewStrings(str []string) *Strings {
+ s := new(Strings)
+ *s = append(*s, str...)
+ *s = util.Unique(*s)
+ return s
+}
+
+// From casts string
+func (s *Strings) From(value []string) *Strings {
+ return NewStrings(value)
+}
+
+// HasValue checks whether value is specified
+func (s *Strings) HasValue() bool {
+ return s.IsValid() && (s.Len() > 0)
+}
+
+func (s *Strings) Has(needle string) bool {
+ if !s.HasValue() {
+ return false
+ }
+ return util.InArray(needle, s.Value())
+}
+
+func (s *Strings) Match(needle string) bool {
+ if !s.HasValue() {
+ return false
+ }
+ return util.MatchArrayOfRegexps(needle, s.Value())
+}
+
+// Value returns value
+func (s *Strings) Value() []string {
+ if s.IsValid() {
+ return *s
+ }
+ return nil
+}
+
+func (s *Strings) First() string {
+ if s.HasValue() {
+ return (*s)[0]
+ }
+ return ""
+}
+
+// String casts to a string
+func (s *Strings) String() string {
+ return strings.Join(s.Value(), " ")
+}
+
+// IsValid checks whether var has a proper value
+func (s *Strings) IsValid() bool {
+ return s != nil
+}
+
+// Len calculates len of the string
+func (s *Strings) Len() int {
+ if s.IsValid() {
+ return len(*s)
+ }
+ return 0
+}
+
+// Normalize normalizes value with fallback to defaultValue in case initial value is incorrect
+func (s *Strings) Normalize(defaultValue []string) *Strings {
+ if s.IsValid() {
+ return s
+ }
+
+ // Value is unrecognized, return default value
+ return NewStrings(defaultValue)
+}
+
+// MergeFrom merges value from another variable
+func (s *Strings) MergeFrom(from *Strings) *Strings {
+ if from == nil {
+ // Nothing to merge from, keep original value
+ return s
+ }
+
+ // From now on we have `from` specified
+
+ if s == nil {
+ // Recipient is not specified, just use `from` value
+ return from
+ }
+
+ // Both recipient and `from` are specified, need to merge
+
+ *s = append(*s, from.Value()...)
+
+ return NewStrings(s.Value())
+}
+
+func (s *Strings) Append(str string) *Strings {
+ return s.MergeFrom(NewStrings([]string{str}))
+}
diff --git a/pkg/apis/deployment/env_vars.go b/pkg/apis/deployment/env_vars.go
index 865cc3b4a..b85878d81 100644
--- a/pkg/apis/deployment/env_vars.go
+++ b/pkg/apis/deployment/env_vars.go
@@ -48,6 +48,8 @@ const (
WATCH_NAMESPACE = "WATCH_NAMESPACE"
// WATCH_NAMESPACES and WATCH_NAMESPACE specifies what namespaces to watch
WATCH_NAMESPACES = "WATCH_NAMESPACES"
+ // WATCH_NAMESPACES_EXCLUDE specifies namespaces that should be excluded from reconciliation
+ WATCH_NAMESPACES_EXCLUDE = "WATCH_NAMESPACES_EXCLUDE"
// CHOP_CONFIG path to clickhouse operator configuration file
CHOP_CONFIG = "CHOP_CONFIG"
diff --git a/pkg/chop/accessors.go b/pkg/chop/accessors.go
index ed288ee20..2241725b9 100644
--- a/pkg/chop/accessors.go
+++ b/pkg/chop/accessors.go
@@ -31,7 +31,7 @@ func New(kubeClient *kube.Clientset, chopClient *chopclientset.Clientset, initCH
// Create operator instance
chop = newCHOp(version.Version, version.GitSHA, version.BuiltAt, kubeClient, chopClient, initCHOpConfigFilePath)
if err := chop.Init(); err != nil {
- log.F().Fatal("Unable to init CHOP instance %v", err)
+ log.F().Fatal("Unable to init CHOP instance. Err: %v", err)
os.Exit(1)
}
chop.SetupLog()
diff --git a/pkg/chop/config_manager.go b/pkg/chop/config_manager.go
index 9dd3dc807..8e0ed2db2 100644
--- a/pkg/chop/config_manager.go
+++ b/pkg/chop/config_manager.go
@@ -340,6 +340,7 @@ func (cm *ConfigManager) listSupportedEnvVarNames() []string {
deployment.WATCH_NAMESPACE,
deployment.WATCH_NAMESPACES,
+ deployment.WATCH_NAMESPACES_EXCLUDE,
}
}
diff --git a/pkg/controller/chi/cmd_queue/type_cmd_queue.go b/pkg/controller/chi/cmd_queue/type_cmd_queue.go
index 135247ead..dd0efef2f 100644
--- a/pkg/controller/chi/cmd_queue/type_cmd_queue.go
+++ b/pkg/controller/chi/cmd_queue/type_cmd_queue.go
@@ -15,8 +15,10 @@
package cmd_queue
import (
- "github.com/altinity/queue"
core "k8s.io/api/core/v1"
+ discovery "k8s.io/api/discovery/v1"
+
+ "github.com/altinity/queue"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
)
@@ -38,10 +40,11 @@ func (i PriorityQueueItem) Priority() int {
}
const (
- priorityReconcileCHI int = 10
- priorityReconcileCHIT int = 5
- priorityReconcileChopConfig int = 3
- priorityReconcileEndpoints int = 15
+ priorityReconcileCHI int = 10
+ priorityReconcileCHIT int = 5
+ priorityReconcileChopConfig int = 3
+ priorityReconcileEndpoints int = 15
+ priorityReconcileEndpointSlice int = 15
)
// ReconcileCHI specifies reconcile request queue item
@@ -194,6 +197,39 @@ func NewReconcileEndpoints(cmd string, old, new *core.Endpoints) *ReconcileEndpo
}
}
+// ReconcileEndpointSlice specifies endpointSlice
+type ReconcileEndpointSlice struct {
+ PriorityQueueItem
+ Cmd string
+ Old *discovery.EndpointSlice
+ New *discovery.EndpointSlice
+}
+
+var _ queue.PriorityQueueItem = &ReconcileEndpointSlice{}
+
+// Handle returns handle of the queue item
+func (r ReconcileEndpointSlice) Handle() queue.T {
+ if r.New != nil {
+ return "ReconcileEndpointSlice" + ":" + r.New.Namespace + "/" + r.New.Name
+ }
+ if r.Old != nil {
+ return "ReconcileEndpointSlice" + ":" + r.Old.Namespace + "/" + r.Old.Name
+ }
+ return ""
+}
+
+// NewReconcileEndpointSlice creates new reconcile endpointSlice queue item
+func NewReconcileEndpointSlice(cmd string, old, new *discovery.EndpointSlice) *ReconcileEndpointSlice {
+ return &ReconcileEndpointSlice{
+ PriorityQueueItem: PriorityQueueItem{
+ priority: priorityReconcileEndpointSlice,
+ },
+ Cmd: cmd,
+ Old: old,
+ New: new,
+ }
+}
+
// ReconcilePod specifies pod reconcile
type ReconcilePod struct {
PriorityQueueItem
@@ -202,7 +238,7 @@ type ReconcilePod struct {
New *core.Pod
}
-var _ queue.PriorityQueueItem = &ReconcileEndpoints{}
+var _ queue.PriorityQueueItem = &ReconcilePod{}
// Handle returns handle of the queue item
func (r ReconcilePod) Handle() queue.T {
diff --git a/pkg/controller/chi/controller-getter.go b/pkg/controller/chi/controller-getter.go
index 3308d6163..048a19011 100644
--- a/pkg/controller/chi/controller-getter.go
+++ b/pkg/controller/chi/controller-getter.go
@@ -15,6 +15,7 @@
package chi
import (
+ "context"
"fmt"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -27,13 +28,13 @@ import (
)
// getPodsIPs gets all pod IPs
-func (c *Controller) getPodsIPs(obj interface{}) (ips []string) {
+func (c *Controller) getPodsIPs(ctx context.Context, obj interface{}) (ips []string) {
l := log.V(3).M(obj).F()
l.S().Info("looking for pods IPs")
defer l.E().Info("looking for pods IPs")
- for _, pod := range c.kube.Pod().GetAll(obj) {
+ for _, pod := range c.kube.Pod().GetAll(ctx, obj) {
if ip := pod.Status.PodIP; ip == "" {
l.Warning("Pod NO IP address found. Pod: %s", util.NamespacedName(pod))
} else {
diff --git a/pkg/controller/chi/controller-podder.go b/pkg/controller/chi/controller-podder.go
deleted file mode 100644
index 7f1a94f46..000000000
--- a/pkg/controller/chi/controller-podder.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- "k8s.io/api/core/v1"
-
- log "github.com/altinity/clickhouse-operator/pkg/announcer"
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
-)
-
-// walkContainers walks with specified func over all containers of the specified host
-func (c *Controller) walkContainers(host *api.Host, f func(container *v1.Container)) {
- pod, err := c.kube.Pod().Get(host)
- if err != nil {
- log.M(host).F().Error("FAIL get pod for host '%s' err: %v", host.Runtime.Address.NamespaceNameString(), err)
- return
- }
-
- for i := range pod.Spec.Containers {
- container := &pod.Spec.Containers[i]
- f(container)
- }
-}
-
-// walkContainerStatuses walks with specified func over all statuses of the specified host
-func (c *Controller) walkContainerStatuses(host *api.Host, f func(status *v1.ContainerStatus)) {
- pod, err := c.kube.Pod().Get(host)
- if err != nil {
- log.M(host).F().Error("FAIL get pod for host %s err:%v", host.Runtime.Address.NamespaceNameString(), err)
- return
- }
-
- for i := range pod.Status.ContainerStatuses {
- status := &pod.Status.ContainerStatuses[i]
- f(status)
- }
-}
-
-// isHostRunning checks whether ALL containers of the specified host are running
-func (c *Controller) isHostRunning(host *api.Host) bool {
- all := true
- c.walkContainerStatuses(host, func(status *v1.ContainerStatus) {
- if status.State.Running == nil {
- all = false
- }
- })
- return all
-}
diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go
index 67a61fd75..07ff71f4b 100644
--- a/pkg/controller/chi/controller.go
+++ b/pkg/controller/chi/controller.go
@@ -18,6 +18,8 @@ import (
"context"
"encoding/json"
"fmt"
+ "sort"
+ "strings"
"time"
"github.com/sanity-io/litter"
@@ -25,6 +27,7 @@ import (
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
+ discovery "k8s.io/api/discovery/v1"
apiExtensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeTypes "k8s.io/apimachinery/pkg/types"
@@ -170,7 +173,7 @@ func (c *Controller) addEventHandlersCHI(
},
DeleteFunc: func(obj interface{}) {
chi := obj.(*api.ClickHouseInstallation)
- if !chop.Config().IsWatchedNamespace(chi.Namespace) {
+ if !chop.Config().IsNamespaceWatched(chi.Namespace) {
return
}
log.V(3).M(chi).Info("chiInformer.DeleteFunc")
@@ -185,7 +188,8 @@ func (c *Controller) addEventHandlersCHIT(
chopInformerFactory.Clickhouse().V1().ClickHouseInstallationTemplates().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
chit := obj.(*api.ClickHouseInstallationTemplate)
- if !chop.Config().IsWatchedNamespace(chit.Namespace) {
+ if !chop.Config().IsNamespaceWatched(chit.Namespace) {
+ log.V(2).M(chit).Info("chitInformer: skip event, namespace '%s' is not watched or is in deny list", chit.Namespace)
return
}
log.V(3).M(chit).Info("chitInformer.AddFunc")
@@ -194,7 +198,7 @@ func (c *Controller) addEventHandlersCHIT(
UpdateFunc: func(old, new interface{}) {
oldChit := old.(*api.ClickHouseInstallationTemplate)
newChit := new.(*api.ClickHouseInstallationTemplate)
- if !chop.Config().IsWatchedNamespace(newChit.Namespace) {
+ if !chop.Config().IsNamespaceWatched(newChit.Namespace) {
return
}
log.V(3).M(newChit).Info("chitInformer.UpdateFunc")
@@ -202,7 +206,7 @@ func (c *Controller) addEventHandlersCHIT(
},
DeleteFunc: func(obj interface{}) {
chit := obj.(*api.ClickHouseInstallationTemplate)
- if !chop.Config().IsWatchedNamespace(chit.Namespace) {
+ if !chop.Config().IsNamespaceWatched(chit.Namespace) {
return
}
log.V(3).M(chit).Info("chitInformer.DeleteFunc")
@@ -217,7 +221,8 @@ func (c *Controller) addEventHandlersChopConfig(
chopInformerFactory.Clickhouse().V1().ClickHouseOperatorConfigurations().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
chopConfig := obj.(*api.ClickHouseOperatorConfiguration)
- if !chop.Config().IsWatchedNamespace(chopConfig.Namespace) {
+ if !chop.Config().IsNamespaceWatched(chopConfig.Namespace) {
+ log.V(2).M(chopConfig).Info("chopInformer: skip event, namespace '%s' is not watched or is in deny list", chopConfig.Namespace)
return
}
log.V(3).M(chopConfig).Info("chopInformer.AddFunc")
@@ -226,7 +231,7 @@ func (c *Controller) addEventHandlersChopConfig(
UpdateFunc: func(old, new interface{}) {
newChopConfig := new.(*api.ClickHouseOperatorConfiguration)
oldChopConfig := old.(*api.ClickHouseOperatorConfiguration)
- if !chop.Config().IsWatchedNamespace(newChopConfig.Namespace) {
+ if !chop.Config().IsNamespaceWatched(newChopConfig.Namespace) {
return
}
log.V(3).M(newChopConfig).Info("chopInformer.UpdateFunc")
@@ -234,7 +239,7 @@ func (c *Controller) addEventHandlersChopConfig(
},
DeleteFunc: func(obj interface{}) {
chopConfig := obj.(*api.ClickHouseOperatorConfiguration)
- if !chop.Config().IsWatchedNamespace(chopConfig.Namespace) {
+ if !chop.Config().IsNamespaceWatched(chopConfig.Namespace) {
return
}
log.V(3).M(chopConfig).Info("chopInformer.DeleteFunc")
@@ -306,10 +311,12 @@ func checkIP(path *messagediff.Path, iValue interface{}) bool {
return false
}
-func updated(old, new *core.Endpoints) bool {
+func isUpdatedEndpoints(old, new *core.Endpoints) bool {
oldSubsets := normalizeEndpoints(old).Subsets
newSubsets := normalizeEndpoints(new).Subsets
+ log.V(1).M(new).F().Info("Check whether is updated Endpoints. %s/%s", new.Namespace, new.Name)
+
diff, equal := messagediff.DeepDiff(oldSubsets[0].Addresses, newSubsets[0].Addresses)
if equal {
log.V(3).M(old).Info("endpointsInformer.UpdateFunc: no changes found")
@@ -342,7 +349,32 @@ func updated(old, new *core.Endpoints) bool {
return false
}
-func (c *Controller) addEventHandlersEndpoint(
+func isUpdatedEndpointSlice(old, new *discovery.EndpointSlice) bool {
+ log.V(1).M(new).F().Info("Check whether is updated EndpointSlice. %s/%s Transition: '%s'=>'%s'", new.Namespace, new.Name, buildComparableEndpointAddresses(old), buildComparableEndpointAddresses(new))
+ return buildComparableEndpointAddresses(old) != buildComparableEndpointAddresses(new)
+}
+
+func buildComparableEndpointAddresses(epSlice *discovery.EndpointSlice) string {
+ return strings.Join(fetchUniqueReadyAddresses(epSlice), ",")
+}
+
+func fetchUniqueReadyAddresses(epSlice *discovery.EndpointSlice) (res []string) {
+ if epSlice == nil {
+ return nil
+ }
+ for _, ep := range epSlice.Endpoints {
+ if (ep.Conditions.Ready != nil) && (*ep.Conditions.Ready == false) {
+ // Skip not-ready address
+ continue
+ }
+ res = append(res, ep.Addresses...)
+ }
+ sort.Strings(res)
+
+ return util.Unique(res)
+}
+
+func (c *Controller) addEventHandlersEndpoints(
kubeInformerFactory kubeInformers.SharedInformerFactory,
) {
kubeInformerFactory.Core().V1().Endpoints().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
@@ -360,7 +392,7 @@ func (c *Controller) addEventHandlersEndpoint(
return
}
log.V(3).M(newEndpoints).Info("endpointsInformer.UpdateFunc")
- if updated(oldEndpoints, newEndpoints) {
+ if isUpdatedEndpoints(oldEndpoints, newEndpoints) {
c.enqueueObject(cmd_queue.NewReconcileEndpoints(cmd_queue.ReconcileUpdate, oldEndpoints, newEndpoints))
}
},
@@ -374,6 +406,38 @@ func (c *Controller) addEventHandlersEndpoint(
})
}
+func (c *Controller) addEventHandlersEndpointSlice(
+ kubeInformerFactory kubeInformers.SharedInformerFactory,
+) {
+ kubeInformerFactory.Discovery().V1().EndpointSlices().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
+ AddFunc: func(obj interface{}) {
+ endpointSlice := obj.(*discovery.EndpointSlice)
+ if !c.isTrackedObject(&endpointSlice.ObjectMeta) {
+ return
+ }
+ log.V(3).M(endpointSlice).Info("endpointSliceInformer.AddFunc")
+ },
+ UpdateFunc: func(old, new interface{}) {
+ oldEndpointSlice := old.(*discovery.EndpointSlice)
+ newEndpointSlice := new.(*discovery.EndpointSlice)
+ if !c.isTrackedObject(&oldEndpointSlice.ObjectMeta) {
+ return
+ }
+ log.V(3).M(newEndpointSlice).Info("endpointSliceInformer.UpdateFunc")
+ if isUpdatedEndpointSlice(oldEndpointSlice, newEndpointSlice) {
+ c.enqueueObject(cmd_queue.NewReconcileEndpointSlice(cmd_queue.ReconcileUpdate, oldEndpointSlice, newEndpointSlice))
+ }
+ },
+ DeleteFunc: func(obj interface{}) {
+ endpointSlice := obj.(*discovery.EndpointSlice)
+ if !c.isTrackedObject(&endpointSlice.ObjectMeta) {
+ return
+ }
+ log.V(3).M(endpointSlice).Info("endpointSliceInformer.DeleteFunc")
+ },
+ })
+}
+
func (c *Controller) addEventHandlersConfigMap(
kubeInformerFactory kubeInformers.SharedInformerFactory,
) {
@@ -473,7 +537,8 @@ func (c *Controller) addEventHandlers(
c.addEventHandlersCHIT(chopInformerFactory)
c.addEventHandlersChopConfig(chopInformerFactory)
c.addEventHandlersService(kubeInformerFactory)
- c.addEventHandlersEndpoint(kubeInformerFactory)
+ //c.addEventHandlersEndpoints(kubeInformerFactory)
+ c.addEventHandlersEndpointSlice(kubeInformerFactory)
c.addEventHandlersConfigMap(kubeInformerFactory)
c.addEventHandlersStatefulSet(kubeInformerFactory)
c.addEventHandlersPod(kubeInformerFactory)
@@ -481,7 +546,7 @@ func (c *Controller) addEventHandlers(
// isTrackedObject checks whether operator is interested in changes of this object
func (c *Controller) isTrackedObject(meta meta.Object) bool {
- return chop.Config().IsWatchedNamespace(meta.GetNamespace()) && chiLabeler.New(nil).IsCHOPGeneratedObject(meta)
+ return chop.Config().IsNamespaceWatched(meta.GetNamespace()) && chiLabeler.New(nil).IsCHOPGeneratedObject(meta)
}
// Run syncs caches, starts workers
@@ -606,6 +671,7 @@ func (c *Controller) enqueueObject(obj queue.PriorityQueueItem) {
*cmd_queue.ReconcileCHIT,
*cmd_queue.ReconcileChopConfig,
*cmd_queue.ReconcileEndpoints,
+ *cmd_queue.ReconcileEndpointSlice,
*cmd_queue.ReconcilePod:
variants := api.DefaultReconcileSystemThreadsNumber
index = util.HashIntoIntTopped(handle, variants)
@@ -856,7 +922,8 @@ func (c *Controller) handleObject(obj interface{}) {
}
func shouldEnqueue(chi *api.ClickHouseInstallation) bool {
- if !chop.Config().IsWatchedNamespace(chi.Namespace) {
+ if !chop.Config().IsNamespaceWatched(chi.Namespace) {
+ log.V(2).M(chi).Info("chiInformer: skip enqueue, namespace '%s' is not watched or is in deny list", chi.Namespace)
return false
}
diff --git a/pkg/controller/chi/kube/config-map.go b/pkg/controller/chi/kube/config-map.go
index 22a758be2..2e104472c 100644
--- a/pkg/controller/chi/kube/config-map.go
+++ b/pkg/controller/chi/kube/config-map.go
@@ -24,7 +24,6 @@ import (
kube "k8s.io/client-go/kubernetes"
log "github.com/altinity/clickhouse-operator/pkg/announcer"
- "github.com/altinity/clickhouse-operator/pkg/chop"
"github.com/altinity/clickhouse-operator/pkg/controller"
"github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
)
@@ -62,7 +61,7 @@ func (c *ConfigMap) Remove(ctx context.Context, namespace, name string) error {
func (c *ConfigMap) Delete(ctx context.Context, namespace, name string) error {
item := "ConfigMap"
return poller.New(ctx, fmt.Sprintf("delete %s: %s/%s", item, namespace, name)).
- WithOptions(poller.NewOptions().FromConfig(chop.Config())).
+ WithOptions(poller.NewOptionsFromConfig()).
WithFunctions(&poller.Functions{
IsDone: func(_ctx context.Context, _ any) bool {
if err := c.Remove(ctx, namespace, name); err != nil {
diff --git a/pkg/controller/chi/kube/pdb.go b/pkg/controller/chi/kube/pdb.go
index 8ee501231..8acef5a8e 100644
--- a/pkg/controller/chi/kube/pdb.go
+++ b/pkg/controller/chi/kube/pdb.go
@@ -24,7 +24,6 @@ import (
kube "k8s.io/client-go/kubernetes"
log "github.com/altinity/clickhouse-operator/pkg/announcer"
- "github.com/altinity/clickhouse-operator/pkg/chop"
"github.com/altinity/clickhouse-operator/pkg/controller"
"github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
)
@@ -62,7 +61,7 @@ func (c *PDB) Remove(ctx context.Context, namespace, name string) error {
func (c *PDB) Delete(ctx context.Context, namespace, name string) error {
item := "PDB"
return poller.New(ctx, fmt.Sprintf("delete %s: %s/%s", item, namespace, name)).
- WithOptions(poller.NewOptions().FromConfig(chop.Config())).
+ WithOptions(poller.NewOptionsFromConfig()).
WithFunctions(&poller.Functions{
IsDone: func(_ctx context.Context, _ any) bool {
if err := c.Remove(ctx, namespace, name); err != nil {
diff --git a/pkg/controller/chi/kube/pod.go b/pkg/controller/chi/kube/pod.go
index ef987ffe7..b4bbb40ea 100644
--- a/pkg/controller/chi/kube/pod.go
+++ b/pkg/controller/chi/kube/pod.go
@@ -42,7 +42,7 @@ func NewPod(kubeClient kube.Interface, namer interfaces.INameManager) *Pod {
// Get gets a pod. Accepted types:
// 1. *apps.StatefulSet
// 2. *chop.Host
-func (c *Pod) Get(params ...any) (*core.Pod, error) {
+func (c *Pod) Get(ctx context.Context, params ...any) (*core.Pod, error) {
var name, namespace string
switch len(params) {
case 2:
@@ -65,29 +65,29 @@ func (c *Pod) Get(params ...any) (*core.Pod, error) {
default:
panic(any("incorrect number or params"))
}
- ctx := k8sCtx(controller.NewContext())
+ ctx = k8sCtx(ctx)
return c.kubeClient.CoreV1().Pods(namespace).Get(ctx, name, controller.NewGetOptions())
}
-func (c *Pod) GetRestartCounters(params ...any) (map[string]int, error) {
- pod, err := c.Get(params...)
+func (c *Pod) GetRestartCounters(ctx context.Context, params ...any) (map[string]int, error) {
+ pod, err := c.Get(ctx, params...)
if err != nil {
return nil, err
}
- return k8s.PodRestartCountersGet(pod), nil
+ return k8s.PodContainersRestartCountsGet(pod), nil
}
// GetAll gets all pods for provided entity
-func (c *Pod) GetAll(obj any) []*core.Pod {
+func (c *Pod) GetAll(ctx context.Context, obj any) []*core.Pod {
switch typed := obj.(type) {
case api.ICustomResource:
- return c.getPods(typed)
+ return c.getPods(ctx, typed)
case api.ICluster:
- return c.getPods(typed)
+ return c.getPods(ctx, typed)
case api.IShard:
- return c.getPods(typed)
+ return c.getPods(ctx, typed)
case *api.Host:
- return c.getPod(typed)
+ return c.getPod(ctx, typed)
default:
panic(any("unknown type"))
}
@@ -103,17 +103,17 @@ type IWalkHosts interface {
}
// getPods gets all pods of an entity
-func (c *Pod) getPods(walker IWalkHosts) (pods []*core.Pod) {
+func (c *Pod) getPods(ctx context.Context, walker IWalkHosts) (pods []*core.Pod) {
walker.WalkHosts(func(host *api.Host) error {
- pods = append(pods, c.getPod(host)...)
+ pods = append(pods, c.getPod(ctx, host)...)
return nil
})
return pods
}
// getPod gets all pods of an entity
-func (c *Pod) getPod(host *api.Host) (pods []*core.Pod) {
- if pod, err := c.Get(host); err == nil {
+func (c *Pod) getPod(ctx context.Context, host *api.Host) (pods []*core.Pod) {
+ if pod, err := c.Get(ctx, host); err == nil {
pods = append(pods, pod)
}
return pods
diff --git a/pkg/controller/chi/kube/secret.go b/pkg/controller/chi/kube/secret.go
index b6af0913d..9a4c8d4ee 100644
--- a/pkg/controller/chi/kube/secret.go
+++ b/pkg/controller/chi/kube/secret.go
@@ -25,7 +25,6 @@ import (
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/chop"
"github.com/altinity/clickhouse-operator/pkg/controller"
"github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
"github.com/altinity/clickhouse-operator/pkg/interfaces"
@@ -87,7 +86,7 @@ func (c *Secret) Remove(ctx context.Context, namespace, name string) error {
func (c *Secret) Delete(ctx context.Context, namespace, name string) error {
item := "Secret"
return poller.New(ctx, fmt.Sprintf("delete %s: %s/%s", item, namespace, name)).
- WithOptions(poller.NewOptions().FromConfig(chop.Config())).
+ WithOptions(poller.NewOptionsFromConfig()).
WithFunctions(&poller.Functions{
IsDone: func(_ctx context.Context, _ any) bool {
if err := c.Remove(ctx, namespace, name); err != nil {
diff --git a/pkg/controller/chi/kube/service.go b/pkg/controller/chi/kube/service.go
index 79e41312b..42d9ccb01 100644
--- a/pkg/controller/chi/kube/service.go
+++ b/pkg/controller/chi/kube/service.go
@@ -25,7 +25,6 @@ import (
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/chop"
"github.com/altinity/clickhouse-operator/pkg/controller"
"github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
"github.com/altinity/clickhouse-operator/pkg/interfaces"
@@ -87,7 +86,7 @@ func (c *Service) Remove(ctx context.Context, namespace, name string) error {
func (c *Service) Delete(ctx context.Context, namespace, name string) error {
item := "Service"
return poller.New(ctx, fmt.Sprintf("delete %s: %s/%s", item, namespace, name)).
- WithOptions(poller.NewOptions().FromConfig(chop.Config())).
+ WithOptions(poller.NewOptionsFromConfig()).
WithFunctions(&poller.Functions{
IsDone: func(_ctx context.Context, _ any) bool {
if err := c.Remove(ctx, namespace, name); err != nil {
diff --git a/pkg/controller/chi/kube/statesfulset.go b/pkg/controller/chi/kube/statesfulset.go
index c8bfdb517..63d4fde52 100644
--- a/pkg/controller/chi/kube/statesfulset.go
+++ b/pkg/controller/chi/kube/statesfulset.go
@@ -25,7 +25,6 @@ import (
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/chop"
"github.com/altinity/clickhouse-operator/pkg/controller"
"github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
"github.com/altinity/clickhouse-operator/pkg/interfaces"
@@ -65,10 +64,10 @@ func (c *STS) Get(ctx context.Context, params ...any) (*apps.StatefulSet, error)
name = c.namer.Name(interfaces.NameStatefulSet, obj)
namespace = typedObj.Runtime.Address.Namespace
default:
- panic("unknown type")
+ panic(any("unknown type"))
}
default:
- panic("unexpected number of args")
+ panic(any("unexpected number of args"))
}
ctx = k8sCtx(ctx)
return c.kubeClient.AppsV1().StatefulSets(namespace).Get(ctx, name, controller.NewGetOptions())
@@ -94,7 +93,7 @@ func (c *STS) Remove(ctx context.Context, namespace, name string) error {
func (c *STS) Delete(ctx context.Context, namespace, name string) error {
item := "StatefulSet"
return poller.New(ctx, fmt.Sprintf("delete %s: %s/%s", item, namespace, name)).
- WithOptions(poller.NewOptions().FromConfig(chop.Config())).
+ WithOptions(poller.NewOptionsFromConfig()).
WithFunctions(&poller.Functions{
IsDone: func(_ctx context.Context, _ any) bool {
if err := c.Remove(ctx, namespace, name); err != nil {
diff --git a/pkg/controller/chi/labeler/labeler.go b/pkg/controller/chi/labeler/labeler.go
index 2df7328be..87d0e9fe2 100644
--- a/pkg/controller/chi/labeler/labeler.go
+++ b/pkg/controller/chi/labeler/labeler.go
@@ -119,7 +119,7 @@ func (l *Labeler) LabelMyObjectsTree(ctx context.Context) error {
}
func (l *Labeler) labelPod(ctx context.Context, namespace, name string) (*core.Pod, error) {
- pod, err := l.pod.Get(namespace, name)
+ pod, err := l.pod.Get(ctx, namespace, name)
if err != nil {
log.V(1).M(namespace, name).F().Error("ERROR get Pod %s/%s %v", namespace, name, err)
return nil, err
@@ -250,7 +250,7 @@ func (l *Labeler) addLabels(labels map[string]string) map[string]string {
// appendLabelReadyOnPod appends Label "Ready" to the pod of the specified host
func (l *Labeler) appendLabelReadyOnPod(ctx context.Context, host *api.Host) error {
- pod, err := l.pod.Get(host)
+ pod, err := l.pod.Get(ctx, host)
if err != nil {
log.M(host).F().Error("FAIL get pod for host %s err:%v", host.Runtime.Address.NamespaceNameString(), err)
return err
@@ -273,7 +273,7 @@ func (l *Labeler) deleteLabelReadyOnPod(ctx context.Context, host *api.Host) err
if host == nil {
return nil
}
- pod, err := l.pod.Get(host)
+ pod, err := l.pod.Get(ctx, host)
if apiErrors.IsNotFound(err) {
// Pod may be missing in case, say, StatefulSet has 0 pods because CHI is stopped
// This is not an error, after all
diff --git a/pkg/controller/chi/worker-boilerplate.go b/pkg/controller/chi/worker-boilerplate.go
index 7fc385f0f..a56b0a7cb 100644
--- a/pkg/controller/chi/worker-boilerplate.go
+++ b/pkg/controller/chi/worker-boilerplate.go
@@ -117,7 +117,21 @@ func (w *worker) processReconcileChopConfig(cmd *cmd_queue.ReconcileChopConfig)
func (w *worker) processReconcileEndpoints(ctx context.Context, cmd *cmd_queue.ReconcileEndpoints) error {
switch cmd.Cmd {
case cmd_queue.ReconcileUpdate:
- return w.updateEndpoints(ctx, cmd.New)
+ w.a.V(1).M(cmd.New).F().Info("Reconcile Endpoints. %s/%s", cmd.New.Namespace, cmd.New.Name)
+ return w.updateEndpoints(ctx, cmd.New.GetObjectMeta())
+ }
+
+ // Unknown item type, don't know what to do with it
+ // Just skip it and behave like it never existed
+ utilRuntime.HandleError(fmt.Errorf("unexpected reconcile - %#v", cmd))
+ return nil
+}
+
+func (w *worker) processReconcileEndpointSlice(ctx context.Context, cmd *cmd_queue.ReconcileEndpointSlice) error {
+ switch cmd.Cmd {
+ case cmd_queue.ReconcileUpdate:
+ w.a.V(1).M(cmd.New).F().Info("Reconcile EndpointSlice. %s/%s Transition: '%s'=>'%s'", cmd.New.Namespace, cmd.New.Name, buildComparableEndpointAddresses(cmd.Old), buildComparableEndpointAddresses(cmd.New))
+ return w.updateEndpoints(ctx, cmd.New.GetObjectMeta())
}
// Unknown item type, don't know what to do with it
@@ -168,6 +182,8 @@ func (w *worker) processItem(ctx context.Context, item interface{}) error {
return w.processReconcileChopConfig(cmd)
case *cmd_queue.ReconcileEndpoints:
return w.processReconcileEndpoints(ctx, cmd)
+ case *cmd_queue.ReconcileEndpointSlice:
+ return w.processReconcileEndpointSlice(ctx, cmd)
case *cmd_queue.ReconcilePod:
return w.processReconcilePod(ctx, cmd)
}
diff --git a/pkg/controller/chi/worker-deleter.go b/pkg/controller/chi/worker-deleter.go
index 9713c7062..fc66b7505 100644
--- a/pkg/controller/chi/worker-deleter.go
+++ b/pkg/controller/chi/worker-deleter.go
@@ -200,37 +200,37 @@ func (w *worker) purgePDB(
func shouldPurgeStatefulSet(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
if reconcileFailedObjs.HasStatefulSet(m) {
- return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetStatefulSet() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetReconcileFailedObjects().GetStatefulSet() == api.ObjectsCleanupDelete
}
- return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetStatefulSet() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetUnknownObjects().GetStatefulSet() == api.ObjectsCleanupDelete
}
func shouldPurgePVC(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
if reconcileFailedObjs.HasPVC(m) {
- return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetPVC() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetReconcileFailedObjects().GetPVC() == api.ObjectsCleanupDelete
}
- return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetPVC() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetUnknownObjects().GetPVC() == api.ObjectsCleanupDelete
}
func shouldPurgeConfigMap(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
if reconcileFailedObjs.HasConfigMap(m) {
- return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetConfigMap() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetReconcileFailedObjects().GetConfigMap() == api.ObjectsCleanupDelete
}
- return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetConfigMap() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetUnknownObjects().GetConfigMap() == api.ObjectsCleanupDelete
}
func shouldPurgeService(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
if reconcileFailedObjs.HasService(m) {
- return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetService() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetReconcileFailedObjects().GetService() == api.ObjectsCleanupDelete
}
- return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetService() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetUnknownObjects().GetService() == api.ObjectsCleanupDelete
}
func shouldPurgeSecret(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
if reconcileFailedObjs.HasSecret(m) {
- return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetSecret() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetReconcileFailedObjects().GetSecret() == api.ObjectsCleanupDelete
}
- return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetSecret() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetUnknownObjects().GetSecret() == api.ObjectsCleanupDelete
}
func shouldPurgePDB(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
diff --git a/pkg/controller/chi/worker-reconciler-chi.go b/pkg/controller/chi/worker-reconciler-chi.go
index efdd10c24..32b3a1a0c 100644
--- a/pkg/controller/chi/worker-reconciler-chi.go
+++ b/pkg/controller/chi/worker-reconciler-chi.go
@@ -120,7 +120,7 @@ func (w *worker) buildCR(ctx context.Context, _cr *api.ClickHouseInstallation) *
common.LogOldAndNew("norm stage 1:", cr.GetAncestorT(), cr)
templates := w.buildTemplates(cr)
- ips := w.c.getPodsIPs(cr)
+ ips := w.c.getPodsIPs(ctx, cr)
w.a.V(1).M(cr).Info("IPs of the CR %s: len: %d %v", util.NamespacedName(cr), len(ips), ips)
if len(ips) > 0 || len(templates) > 0 {
// Rebuild CR with known list of templates and additional IPs
@@ -365,12 +365,13 @@ func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.Host, o
w.a.V(1).M(host).F().Info("Reconcile host STS: %s. App version: %s", host.GetName(), host.Runtime.Version.Render())
// Start with force-restart host
- if w.shouldForceRestartHost(host) {
+ if w.shouldForceRestartHost(ctx, host) {
w.a.V(1).M(host).F().Info("Reconcile host STS force restart: %s", host.GetName())
_ = w.hostForceRestart(ctx, host, opts)
}
w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, host.IsStopped())
+ opts = w.prepareStsReconcileOptsWaitSection(host, opts)
// We are in place, where we can reconcile StatefulSet to desired configuration.
w.a.V(1).M(host).F().Info("Reconcile host STS: %s. Reconcile StatefulSet", host.GetName())
@@ -410,7 +411,7 @@ func (w *worker) hostSoftwareRestart(ctx context.Context, host *api.Host) error
w.a.V(1).M(host).F().Info("Host software restart start. Host: %s", host.GetName())
// Get restart counters - they'll be used to check restart success
- restarts, err := w.c.kube.Pod().(interfaces.IKubePodEx).GetRestartCounters(host)
+ restartCounters, err := w.c.kube.Pod().(interfaces.IKubePodEx).GetRestartCounters(ctx, host)
if err != nil {
w.a.V(1).M(host).F().Info("Host software restart abort 1. Host: %s err: %v", host.GetName(), err)
return err
@@ -425,7 +426,7 @@ func (w *worker) hostSoftwareRestart(ctx context.Context, host *api.Host) error
w.a.V(1).M(host).F().Info("Host software shutdown ok. Host: %s", host.GetName())
// Wait for restart counters to change
- err = w.waitHostRestart(ctx, host, restarts)
+ err = w.waitHostRestart(ctx, host, restartCounters)
if err != nil {
w.a.V(1).M(host).F().Info("Host software restart abort 3. Host: %s err: %v", host.GetName(), err)
return err
@@ -465,7 +466,7 @@ func (w *worker) hostSoftwareRestart(ctx context.Context, host *api.Host) error
w.a.V(1).M(host).F().Info("Host software version ok. Host: %s ", host.GetName())
// However, some containers within the pod may still have flapping problems and be in CrashLoopBackOff
- if w.isPodCrushed(host) {
+ if w.isPodCrushed(ctx, host) {
w.a.V(1).M(host).F().Info("Host software restart abort 8. Host: %s is crushed", host.GetName())
return fmt.Errorf("host is crushed")
}
@@ -817,6 +818,22 @@ func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error {
return nil
}
+func (w *worker) prepareStsReconcileOptsWaitSection(host *api.Host, opts *statefulset.ReconcileOptions) *statefulset.ReconcileOptions {
+ if host.GetCluster().GetReconcile().Host.Wait.Probes.GetStartup().IsTrue() {
+ opts = opts.SetWaitUntilStarted()
+ w.a.V(1).
+ M(host).F().
+ Warning("Setting option SetWaitUntilStarted ")
+ }
+ if host.GetCluster().GetReconcile().Host.Wait.Probes.GetReadiness().IsTrue() {
+ opts = opts.SetWaitUntilReady()
+ w.a.V(1).
+ M(host).F().
+ Warning("Setting option SetWaitUntilReady")
+ }
+ return opts
+}
+
func (w *worker) reconcileHostPVCs(ctx context.Context, host *api.Host) storage.ErrorDataPersistence {
return storage.NewStorageReconciler(
w.task,
diff --git a/pkg/controller/chi/worker-status-helpers.go b/pkg/controller/chi/worker-status-helpers.go
index d755edf5d..00823eec4 100644
--- a/pkg/controller/chi/worker-status-helpers.go
+++ b/pkg/controller/chi/worker-status-helpers.go
@@ -38,44 +38,44 @@ func (w *worker) isJustStarted() bool {
return time.Since(w.start) < timeToStart
}
-func (w *worker) isPodCrushed(host *api.Host) bool {
- if pod, err := w.c.kube.Pod().Get(host); err == nil {
+func (w *worker) isPodCrushed(ctx context.Context, host *api.Host) bool {
+ if pod, err := w.c.kube.Pod().Get(ctx, host); err == nil {
return k8s.PodHasCrushedContainers(pod)
}
return true
}
func (w *worker) isPodReady(ctx context.Context, host *api.Host) bool {
- if pod, err := w.c.kube.Pod().Get(host); err == nil {
+ if pod, err := w.c.kube.Pod().Get(ctx, host); err == nil {
return !k8s.PodHasNotReadyContainers(pod)
}
return false
}
func (w *worker) isPodStarted(ctx context.Context, host *api.Host) bool {
- if pod, err := w.c.kube.Pod().Get(host); err == nil {
+ if pod, err := w.c.kube.Pod().Get(ctx, host); err == nil {
return k8s.PodHasAllContainersStarted(pod)
}
return false
}
func (w *worker) isPodRunning(ctx context.Context, host *api.Host) bool {
- if pod, err := w.c.kube.Pod().Get(host); err == nil {
+ if pod, err := w.c.kube.Pod().Get(ctx, host); err == nil {
return k8s.PodPhaseIsRunning(pod)
}
return false
}
func (w *worker) isPodOK(ctx context.Context, host *api.Host) bool {
- if pod, err := w.c.kube.Pod().Get(host); err == nil {
+ if pod, err := w.c.kube.Pod().Get(ctx, host); err == nil {
return k8s.IsPodOK(pod)
}
return false
}
-func (w *worker) isPodRestarted(ctx context.Context, host *api.Host, start map[string]int) bool {
- cur, _ := w.c.kube.Pod().(interfaces.IKubePodEx).GetRestartCounters(host)
- return !util.MapsAreTheSame(start, cur)
+func (w *worker) isPodRestarted(ctx context.Context, host *api.Host, initialRestartCounters map[string]int) bool {
+ curRestartCounters, _ := w.c.kube.Pod().(interfaces.IKubePodEx).GetRestartCounters(ctx, host)
+ return !util.MapsAreTheSame(initialRestartCounters, curRestartCounters)
}
func (w *worker) doesHostHaveNoRunningQueries(ctx context.Context, host *api.Host) bool {
diff --git a/pkg/controller/chi/worker-wait-exclude-include-restart.go b/pkg/controller/chi/worker-wait-exclude-include-restart.go
index ab716c9bb..884b5c18f 100644
--- a/pkg/controller/chi/worker-wait-exclude-include-restart.go
+++ b/pkg/controller/chi/worker-wait-exclude-include-restart.go
@@ -55,7 +55,7 @@ func (w *worker) waitForIPAddresses(ctx context.Context, chi *api.ClickHouseInst
// cur.EnsureStatus().SetPodIPs(podIPs)
// and here
// c.Status.GetPodIPs()
- podIPs := w.c.getPodsIPs(chi)
+ podIPs := w.c.getPodsIPs(ctx, chi)
if len(podIPs) >= len(c.Status.GetPods()) {
l.Info("all IP addresses are in place")
// Stop polling
@@ -79,7 +79,7 @@ func (w *worker) excludeHost(ctx context.Context, host *api.Host) bool {
log.V(1).M(host).F().S().Info("exclude host start")
defer log.V(1).M(host).F().E().Info("exclude host end")
- if !w.shouldExcludeHost(host) {
+ if !w.shouldExcludeHost(ctx, host) {
w.a.V(1).
M(host).F().
Info("No need to exclude host from cluster. Host/shard/cluster: %d/%d/%s",
@@ -110,7 +110,7 @@ func (w *worker) completeQueries(ctx context.Context, host *api.Host) error {
return nil
}
-// shouldIncludeHost determines whether host to be included into cluster after reconciling
+// shouldIncludeHost determines whether host to be included into cluster after reconcile
func (w *worker) shouldIncludeHost(host *api.Host) bool {
switch {
case host.IsStopped():
@@ -158,17 +158,18 @@ func (w *worker) shouldWaitReplicationHost(host *api.Host) bool {
return true
case chop.Config().Reconcile.Host.Wait.Replicas.New.IsTrue():
- // New replicas are explicitly requested to wait for replication to catch-up.
-
+ // New replicas have personal catch-up requirements
if host.GetReconcileAttributes().GetStatus().Is(types.ObjectStatusCreated) {
w.a.V(1).
M(host).F().
- Info("This is a new host replica - need to catch-up")
+ Info("New replicas are explicitly requested to wait for replication to catch-up and this is a new host replica ")
return true
}
- // This is not a new replica, it may have incomplete replication catch-up job still
+ // This is not a new replica.
+ // But this replica may have incomplete replication catch-up job still
+ // Whether replication is listed as caught-up earlier
if host.HasListedReplicaCaughtUp(w.c.namer.Name(interfaces.NameFQDN, host)) {
w.a.V(1).
M(host).F().
@@ -176,6 +177,7 @@ func (w *worker) shouldWaitReplicationHost(host *api.Host) bool {
return false
}
+ // Host was seen before, but replication is not listed as caught-up, need to finish the replication
w.a.V(1).
M(host).F().
Info("Host replica has never reached caught-up status, need to wait for replication to commence")
@@ -347,8 +349,8 @@ func (w *worker) catchReplicationLag(ctx context.Context, host *api.Host) error
return err
}
-// shouldExcludeHost determines whether host to be excluded from cluster before reconciling
-func (w *worker) shouldExcludeHost(host *api.Host) bool {
+// shouldExcludeHost determines whether host to be excluded from cluster before reconcile
+func (w *worker) shouldExcludeHost(ctx context.Context, host *api.Host) bool {
switch {
case host.IsStopped():
w.a.V(1).
@@ -371,7 +373,7 @@ func (w *worker) shouldExcludeHost(host *api.Host) bool {
host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return false
- case w.shouldForceRestartHost(host):
+ case w.shouldForceRestartHost(ctx, host):
w.a.V(1).
M(host).F().
Info("Host should be restarted, need to exclude. Host/shard/cluster: %d/%d/%s",
@@ -405,13 +407,13 @@ func (w *worker) shouldExcludeHost(host *api.Host) bool {
func (w *worker) shouldWaitExcludeHost(host *api.Host) bool {
// Check CHI settings
switch {
- case host.GetCR().GetReconciling().IsReconcilingPolicyWait():
+ case host.GetCR().GetReconcile().IsReconcilingPolicyWait():
w.a.V(1).
M(host).F().
Info("IsReconcilingPolicyWait() need to wait to exclude host. Host/shard/cluster: %d/%d/%s",
host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return true
- case host.GetCR().GetReconciling().IsReconcilingPolicyNoWait():
+ case host.GetCR().GetReconcile().IsReconcilingPolicyNoWait():
w.a.V(1).
M(host).F().
Info("IsReconcilingPolicyNoWait() need NOT to wait to exclude host. Host/shard/cluster: %d/%d/%s",
@@ -443,7 +445,7 @@ func (w *worker) shouldWaitQueries(host *api.Host) bool {
"Host/shard/cluster: %d/%d/%s",
host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
return true
- case host.GetCR().GetReconciling().IsReconcilingPolicyWait():
+ case host.GetCR().GetReconcile().IsReconcilingPolicyWait():
w.a.V(1).
M(host).F().
Info("Will wait for queries to complete on a host according to CHI 'reconciling.policy' setting. "+
@@ -474,10 +476,10 @@ func (w *worker) shouldWaitIncludeHostIntoClickHouseCluster(host *api.Host) bool
case host.GetShard().HostsCount() == 1:
// No need to wait one-host-shard
return false
- case host.GetCR().GetReconciling().IsReconcilingPolicyWait():
+ case host.GetCR().GetReconcile().IsReconcilingPolicyWait():
// Check CHI settings - explicitly requested to wait
return true
- case host.GetCR().GetReconciling().IsReconcilingPolicyNoWait():
+ case host.GetCR().GetReconcile().IsReconcilingPolicyNoWait():
// Check CHI settings - explicitly requested to not wait
return false
}
@@ -509,9 +511,9 @@ func (w *worker) waitHostHasNoReplicationDelay(ctx context.Context, host *api.Ho
}
// waitHostRestart
-func (w *worker) waitHostRestart(ctx context.Context, host *api.Host, start map[string]int) error {
+func (w *worker) waitHostRestart(ctx context.Context, host *api.Host, restartCounters map[string]int) error {
return domain.PollHost(ctx, host, func(ctx context.Context, host *api.Host) bool {
- return w.isPodRestarted(ctx, host, start)
+ return w.isPodRestarted(ctx, host, restartCounters)
})
}
diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go
index 081e93691..3a8be8fd8 100644
--- a/pkg/controller/chi/worker.go
+++ b/pkg/controller/chi/worker.go
@@ -138,7 +138,7 @@ func (w *worker) newTask(new, old *api.ClickHouseInstallation) {
w.stsReconciler = statefulset.NewReconciler(
w.a,
w.task,
- domain.NewHostStatefulSetPoller(domain.NewStatefulSetPoller(w.c.kube), w.c.kube, w.c.ctrlLabeler),
+ domain.NewHostObjectsPoller(domain.NewHostObjectPoller(w.c.kube.STS()), domain.NewHostObjectPoller(w.c.kube.Pod()), w.c.ctrlLabeler),
w.c.namer,
labeler.New(new),
storage.NewStorageReconciler(w.task, w.c.namer, w.c.kube.Storage()),
@@ -148,7 +148,7 @@ func (w *worker) newTask(new, old *api.ClickHouseInstallation) {
}
// shouldForceRestartHost checks whether cluster requires hosts restart
-func (w *worker) shouldForceRestartHost(host *api.Host) bool {
+func (w *worker) shouldForceRestartHost(ctx context.Context, host *api.Host) bool {
switch {
case host.HasAncestor() && host.GetAncestor().IsStopped():
w.a.V(1).M(host).F().Info("Host ancestor is stopped, no restart applicable. Host: %s", host.GetName())
@@ -182,7 +182,7 @@ func (w *worker) shouldForceRestartHost(host *api.Host) bool {
w.a.V(1).M(host).F().Info("Config change(s) require host restart. Host: %s", host.GetName())
return true
- case host.Runtime.Version.IsUnknown() && w.isPodCrushed(host):
+ case host.Runtime.Version.IsUnknown() && w.isPodCrushed(ctx, host):
w.a.V(1).M(host).F().Info("Host with unknown version and in CrashLoopBackOff should be restarted. It most likely is unable to start due to bad config. Host: %s", host.GetName())
return true
@@ -222,10 +222,10 @@ func (w *worker) ensureFinalizer(ctx context.Context, chi *api.ClickHouseInstall
}
// updateEndpoints updates endpoints
-func (w *worker) updateEndpoints(ctx context.Context, ep *core.Endpoints) error {
+func (w *worker) updateEndpoints(ctx context.Context, m meta.Object) error {
_ = w.finalizeCR(
ctx,
- ep,
+ m,
types.UpdateStatusOptions{
TolerateAbsence: true,
CopyStatusOptions: types.CopyStatusOptions{
diff --git a/pkg/controller/chk/controller-getter.go b/pkg/controller/chk/controller-getter.go
index c81241c51..d85de82f4 100644
--- a/pkg/controller/chk/controller-getter.go
+++ b/pkg/controller/chk/controller-getter.go
@@ -15,6 +15,7 @@
package chk
import (
+ "context"
"fmt"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -27,13 +28,13 @@ import (
)
// getPodsIPs gets all pod IPs
-func (c *Controller) getPodsIPs(obj interface{}) (ips []string) {
+func (c *Controller) getPodsIPs(ctx context.Context, obj interface{}) (ips []string) {
l := log.V(3).M(obj).F()
l.S().Info("looking for pods IPs")
defer l.E().Info("looking for pods IPs")
- for _, pod := range c.kube.Pod().GetAll(obj) {
+ for _, pod := range c.kube.Pod().GetAll(ctx, obj) {
if ip := pod.Status.PodIP; ip == "" {
l.Warning("Pod NO IP address found. Pod: %s", util.NamespacedName(pod))
} else {
diff --git a/pkg/controller/chk/kube/pod.go b/pkg/controller/chk/kube/pod.go
index ecfaa9d2c..35bca8581 100644
--- a/pkg/controller/chk/kube/pod.go
+++ b/pkg/controller/chk/kube/pod.go
@@ -24,7 +24,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/controller"
"github.com/altinity/clickhouse-operator/pkg/interfaces"
)
@@ -43,7 +42,7 @@ func NewPod(kubeClient client.Client, namer interfaces.INameManager) *Pod {
// Get gets pod. Accepted types:
// 1. *apps.StatefulSet
// 2. *chop.Host
-func (c *Pod) Get(params ...any) (*core.Pod, error) {
+func (c *Pod) Get(ctx context.Context, params ...any) (*core.Pod, error) {
var name, namespace string
switch len(params) {
case 2:
@@ -67,7 +66,7 @@ func (c *Pod) Get(params ...any) (*core.Pod, error) {
panic(any("incorrect number or params"))
}
pod := &core.Pod{}
- err := c.kubeClient.Get(controller.NewContext(), types.NamespacedName{
+ err := c.kubeClient.Get(ctx, types.NamespacedName{
Namespace: namespace,
Name: name,
}, pod)
@@ -75,16 +74,16 @@ func (c *Pod) Get(params ...any) (*core.Pod, error) {
}
// GetAll gets all pods for provided entity
-func (c *Pod) GetAll(obj any) []*core.Pod {
+func (c *Pod) GetAll(ctx context.Context, obj any) []*core.Pod {
switch typed := obj.(type) {
case api.ICustomResource:
- return c.getPodsOfCR(typed)
+ return c.getPodsOfCR(ctx, typed)
case api.ICluster:
- return c.getPodsOfCluster(typed)
+ return c.getPodsOfCluster(ctx, typed)
case api.IShard:
- return c.getPodsOfShard(typed)
+ return c.getPodsOfShard(ctx, typed)
case *api.Host:
- if pod, err := c.Get(typed); err == nil {
+ if pod, err := c.Get(ctx, typed); err == nil {
return []*core.Pod{
pod,
}
@@ -101,9 +100,9 @@ func (c *Pod) Update(ctx context.Context, pod *core.Pod) (*core.Pod, error) {
}
// getPodsOfCluster gets all pods in a cluster
-func (c *Pod) getPodsOfCluster(cluster api.ICluster) (pods []*core.Pod) {
+func (c *Pod) getPodsOfCluster(ctx context.Context, cluster api.ICluster) (pods []*core.Pod) {
cluster.WalkHosts(func(host *api.Host) error {
- if pod, err := c.Get(host); err == nil {
+ if pod, err := c.Get(ctx, host); err == nil {
pods = append(pods, pod)
}
return nil
@@ -112,9 +111,9 @@ func (c *Pod) getPodsOfCluster(cluster api.ICluster) (pods []*core.Pod) {
}
// getPodsOfShard gets all pods in a shard
-func (c *Pod) getPodsOfShard(shard api.IShard) (pods []*core.Pod) {
+func (c *Pod) getPodsOfShard(ctx context.Context, shard api.IShard) (pods []*core.Pod) {
shard.WalkHosts(func(host *api.Host) error {
- if pod, err := c.Get(host); err == nil {
+ if pod, err := c.Get(ctx, host); err == nil {
pods = append(pods, pod)
}
return nil
@@ -123,9 +122,9 @@ func (c *Pod) getPodsOfShard(shard api.IShard) (pods []*core.Pod) {
}
// getPodsOfCR gets all pods in a CHI
-func (c *Pod) getPodsOfCR(cr api.ICustomResource) (pods []*core.Pod) {
+func (c *Pod) getPodsOfCR(ctx context.Context, cr api.ICustomResource) (pods []*core.Pod) {
cr.WalkHosts(func(host *api.Host) error {
- if pod, err := c.Get(host); err == nil {
+ if pod, err := c.Get(ctx, host); err == nil {
pods = append(pods, pod)
}
return nil
diff --git a/pkg/controller/chk/worker-deleter.go b/pkg/controller/chk/worker-deleter.go
index 19e77c1b5..7b13906a1 100644
--- a/pkg/controller/chk/worker-deleter.go
+++ b/pkg/controller/chk/worker-deleter.go
@@ -176,37 +176,37 @@ func (w *worker) purgePDB(
func shouldPurgeStatefulSet(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
if reconcileFailedObjs.HasStatefulSet(m) {
- return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetStatefulSet() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetReconcileFailedObjects().GetStatefulSet() == api.ObjectsCleanupDelete
}
- return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetStatefulSet() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetUnknownObjects().GetStatefulSet() == api.ObjectsCleanupDelete
}
func shouldPurgePVC(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
if reconcileFailedObjs.HasPVC(m) {
- return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetPVC() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetReconcileFailedObjects().GetPVC() == api.ObjectsCleanupDelete
}
- return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetPVC() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetUnknownObjects().GetPVC() == api.ObjectsCleanupDelete
}
func shouldPurgeConfigMap(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
if reconcileFailedObjs.HasConfigMap(m) {
- return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetConfigMap() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetReconcileFailedObjects().GetConfigMap() == api.ObjectsCleanupDelete
}
- return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetConfigMap() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetUnknownObjects().GetConfigMap() == api.ObjectsCleanupDelete
}
func shouldPurgeService(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
if reconcileFailedObjs.HasService(m) {
- return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetService() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetReconcileFailedObjects().GetService() == api.ObjectsCleanupDelete
}
- return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetService() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetUnknownObjects().GetService() == api.ObjectsCleanupDelete
}
func shouldPurgeSecret(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
if reconcileFailedObjs.HasSecret(m) {
- return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetSecret() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetReconcileFailedObjects().GetSecret() == api.ObjectsCleanupDelete
}
- return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetSecret() == api.ObjectsCleanupDelete
+ return cr.GetReconcile().GetCleanup().GetUnknownObjects().GetSecret() == api.ObjectsCleanupDelete
}
func shouldPurgePDB(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
diff --git a/pkg/controller/chk/worker-exclude-include-wait.go b/pkg/controller/chk/worker-exclude-include-wait.go
index 6b8136aac..de2e83be8 100644
--- a/pkg/controller/chk/worker-exclude-include-wait.go
+++ b/pkg/controller/chk/worker-exclude-include-wait.go
@@ -43,7 +43,7 @@ func (w *worker) waitForIPAddresses(ctx context.Context, chk *apiChk.ClickHouseK
// cur.EnsureStatus().SetPodIPs(podIPs)
// and here
// c.Status.GetPodIPs()
- podIPs := w.c.getPodsIPs(chk)
+ podIPs := w.c.getPodsIPs(ctx, chk)
if len(podIPs) >= len(c.Status.GetPods()) {
// Stop polling
w.a.V(1).M(c).Info("all IP addresses are in place")
@@ -60,7 +60,7 @@ func (w *worker) waitForIPAddresses(ctx context.Context, chk *apiChk.ClickHouseK
})
}
-// shouldIncludeHost determines whether host to be included into cluster after reconciling
+// shouldIncludeHost determines whether host to be included into cluster after reconcile
func (w *worker) shouldIncludeHost(host *api.Host) bool {
switch {
case host.IsStopped():
diff --git a/pkg/controller/chk/worker-reconciler-chk.go b/pkg/controller/chk/worker-reconciler-chk.go
index 5e69770cd..56afe357e 100644
--- a/pkg/controller/chk/worker-reconciler-chk.go
+++ b/pkg/controller/chk/worker-reconciler-chk.go
@@ -99,7 +99,7 @@ func (w *worker) buildCR(ctx context.Context, _cr *apiChk.ClickHouseKeeperInstal
common.LogOldAndNew("norm stage 1:", cr.GetAncestorT(), cr)
templates := w.buildTemplates(cr)
- ips := w.c.getPodsIPs(cr)
+ ips := w.c.getPodsIPs(ctx, cr)
w.a.V(1).M(cr).Info("IPs of the CR %s: len: %d %v", util.NamespacedName(cr), len(ips), ips)
if len(ips) > 0 || len(templates) > 0 {
// Rebuild CR with known list of templates and additional IPs
@@ -348,7 +348,7 @@ func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.Host, o
w.a.V(1).M(host).F().Info("Reconcile host: %s. App version: %s", host.GetName(), version)
// In case we have to force-restart host
// We'll do it via replicas: 0 in StatefulSet.
- if w.shouldForceRestartHost(host) {
+ if w.shouldForceRestartHost(ctx, host) {
w.a.V(1).M(host).F().Info("Reconcile host: %s. Shutting host down due to force restart", host.GetName())
w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, true)
_ = w.stsReconciler.ReconcileStatefulSet(ctx, host, false, opts)
diff --git a/pkg/controller/chk/worker-status-helpers.go b/pkg/controller/chk/worker-status-helpers.go
index 0e14e1ea8..90e2ba556 100644
--- a/pkg/controller/chk/worker-status-helpers.go
+++ b/pkg/controller/chk/worker-status-helpers.go
@@ -15,13 +15,15 @@
package chk
import (
+ "context"
+
apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/model/k8s"
)
-func (w *worker) isPodCrushed(host *api.Host) bool {
- if pod, err := w.c.kube.Pod().Get(host); err == nil {
+func (w *worker) isPodCrushed(ctx context.Context, host *api.Host) bool {
+ if pod, err := w.c.kube.Pod().Get(ctx, host); err == nil {
return k8s.PodHasCrushedContainers(pod)
}
return true
diff --git a/pkg/controller/chk/worker.go b/pkg/controller/chk/worker.go
index 1ca6efe93..57c990fa5 100644
--- a/pkg/controller/chk/worker.go
+++ b/pkg/controller/chk/worker.go
@@ -113,8 +113,7 @@ func (w *worker) newTask(new, old *apiChk.ClickHouseKeeperInstallation) {
w.stsReconciler = statefulset.NewReconciler(
w.a,
w.task,
- //poller.NewHostStatefulSetPoller(poller.NewStatefulSetPoller(w.c.kube), w.c.kube, w.c.labeler),
- domain.NewHostStatefulSetPoller(domain.NewStatefulSetPoller(w.c.kube), w.c.kube, nil),
+ domain.NewHostObjectsPoller(domain.NewHostObjectPoller(w.c.kube.STS()), domain.NewHostObjectPoller(w.c.kube.Pod()), nil),
w.c.namer,
labeler.New(new),
storage.NewStorageReconciler(w.task, w.c.namer, w.c.kube.Storage()),
@@ -124,7 +123,7 @@ func (w *worker) newTask(new, old *apiChk.ClickHouseKeeperInstallation) {
}
// shouldForceRestartHost checks whether cluster requires hosts restart
-func (w *worker) shouldForceRestartHost(host *api.Host) bool {
+func (w *worker) shouldForceRestartHost(ctx context.Context, host *api.Host) bool {
switch {
case host.HasAncestor() && host.GetAncestor().IsStopped():
w.a.V(1).M(host).F().Info("Host ancestor is stopped, no restart applicable. Host: %s", host.GetName())
@@ -158,7 +157,7 @@ func (w *worker) shouldForceRestartHost(host *api.Host) bool {
w.a.V(1).M(host).F().Info("Config change(s) require host restart. Host: %s", host.GetName())
return true
- case host.Runtime.Version.IsUnknown() && w.isPodCrushed(host):
+ case host.Runtime.Version.IsUnknown() && w.isPodCrushed(ctx, host):
w.a.V(1).M(host).F().Info("Host with unknown version and in CrashLoopBackOff should be restarted. It most likely is unable to start due to bad config. Host: %s", host.GetName())
return true
@@ -204,7 +203,7 @@ func (w *worker) finalizeReconcileAndMarkCompleted(ctx context.Context, _cr *api
// Update CHI object
if chi, err := w.createCRFromObjectMeta(_cr, true, commonNormalizer.NewOptions[apiChk.ClickHouseKeeperInstallation]()); err == nil {
w.a.V(1).M(chi).Info("updating endpoints for CR-2 %s", chi.Name)
- ips := w.c.getPodsIPs(chi)
+ ips := w.c.getPodsIPs(ctx, chi)
w.a.V(1).M(chi).Info("IPs of the CR-2 finalize reconcile %s/%s: len: %d %v", chi.Namespace, chi.Name, len(ips), ips)
opts := commonNormalizer.NewOptions[apiChk.ClickHouseKeeperInstallation]()
opts.DefaultUserAdditionalIPs = ips
diff --git a/pkg/controller/common/poller/domain/poller-host-object.go b/pkg/controller/common/poller/domain/poller-host-object.go
new file mode 100644
index 000000000..07eb78a1f
--- /dev/null
+++ b/pkg/controller/common/poller/domain/poller-host-object.go
@@ -0,0 +1,73 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package domain
+
+import (
+ "context"
+ "fmt"
+
+ apiErrors "k8s.io/apimachinery/pkg/api/errors"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+type polledObjectGetter[TypeToGet any] interface {
+ Get(ctx context.Context, params ...any) (*TypeToGet, error)
+}
+
+type HostObjectPoller[TypeToPoll any] struct {
+ getter polledObjectGetter[TypeToPoll]
+}
+
+func NewHostObjectPoller[TypeToPoll any](getter polledObjectGetter[TypeToPoll]) *HostObjectPoller[TypeToPoll] {
+ return &HostObjectPoller[TypeToPoll]{
+ getter: getter,
+ }
+}
+
+// Poll polls host's object
+func (p *HostObjectPoller[TypeToPoll]) Poll(
+ ctx context.Context,
+ host *api.Host,
+ isDoneFn func(context.Context, *TypeToPoll) bool,
+ _opts ...*poller.Options,
+) error {
+ if util.IsContextDone(ctx) {
+ log.V(1).Info("poll is aborted")
+ return nil
+ }
+
+ caption := fmt.Sprintf("%s/%s", host.Runtime.Address.Namespace, host.Runtime.Address.HostName)
+ opts := poller.NewOptionsFromConfig(_opts...)
+ functions := &poller.Functions{
+ Get: func(_ctx context.Context) (any, error) {
+ return p.getter.Get(ctx, host)
+ },
+ IsDone: func(_ctx context.Context, a any) bool {
+ return isDoneFn(_ctx, a.(*TypeToPoll))
+ },
+ ShouldContinueOnGetError: func(_ctx context.Context, _ any, e error) bool {
+ return apiErrors.IsNotFound(e)
+ },
+ }
+
+ return poller.New(ctx, caption).
+ WithOptions(opts).
+ WithFunctions(functions).
+ Poll()
+}
diff --git a/pkg/controller/common/poller/domain/poller-host-objects.go b/pkg/controller/common/poller/domain/poller-host-objects.go
new file mode 100644
index 000000000..49f03089e
--- /dev/null
+++ b/pkg/controller/common/poller/domain/poller-host-objects.go
@@ -0,0 +1,123 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package domain
+
+import (
+ "context"
+
+ apps "k8s.io/api/apps/v1"
+ core "k8s.io/api/core/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/model/k8s"
+)
+
+type readyMarkDeleter interface {
+ DeleteReadyMarkOnPodAndService(ctx context.Context, host *api.Host) error
+}
+
+type HostObjectsPoller struct {
+ stsPoller *HostObjectPoller[apps.StatefulSet]
+ podPoller *HostObjectPoller[core.Pod]
+ readyMarkDeleter readyMarkDeleter
+}
+
+// NewHostObjectsPoller creates new HostObjectsPoller
+func NewHostObjectsPoller(
+ stsPoller *HostObjectPoller[apps.StatefulSet],
+ podPoller *HostObjectPoller[core.Pod],
+ readyMarkDeleter readyMarkDeleter,
+) *HostObjectsPoller {
+ return &HostObjectsPoller{
+ stsPoller: stsPoller,
+ podPoller: podPoller,
+ readyMarkDeleter: readyMarkDeleter,
+ }
+}
+
+// WaitHostStatefulSetReady polls host's StatefulSet until it is ready
+func (p *HostObjectsPoller) WaitHostPodStarted(ctx context.Context, host *api.Host) error {
+ log.V(2).F().Info("Wait for StatefulSet to reach target generation")
+ err := p.stsPoller.Poll(
+ ctx,
+ host,
+ func(_ctx context.Context, sts *apps.StatefulSet) bool {
+ if sts == nil {
+ return false
+ }
+ _ = p.readyMarkDeleter.DeleteReadyMarkOnPodAndService(_ctx, host)
+ return k8s.IsStatefulSetReconcileCompleted(sts)
+ },
+ )
+ if err != nil {
+ log.V(1).F().Warning("FAILED wait for StatefulSet to reach generation")
+ return err
+ }
+
+ log.V(2).F().Info("Wait Pod to reach started status")
+ err = p.podPoller.Poll(
+ ctx,
+ host,
+ func(_ctx context.Context, pod *core.Pod) bool {
+ _ = p.readyMarkDeleter.DeleteReadyMarkOnPodAndService(_ctx, host)
+ return k8s.PodHasAllContainersStarted(pod)
+ },
+ )
+ if err != nil {
+ log.V(1).F().Warning("FAILED wait Pod to reach started status")
+ return err
+ }
+
+ log.V(2).F().Info("Wait Pod to reach started status completed OK")
+ return nil
+}
+
+// WaitHostStatefulSetReady polls host's StatefulSet until it is ready
+func (p *HostObjectsPoller) WaitHostStatefulSetReady(ctx context.Context, host *api.Host) error {
+ log.V(2).F().Info("Wait for StatefulSet to reach target generation")
+ err := p.stsPoller.Poll(
+ ctx,
+ host,
+ func(_ctx context.Context, sts *apps.StatefulSet) bool {
+ if sts == nil {
+ return false
+ }
+ _ = p.readyMarkDeleter.DeleteReadyMarkOnPodAndService(_ctx, host)
+ return k8s.IsStatefulSetReconcileCompleted(sts)
+ },
+ )
+ if err != nil {
+ log.V(1).F().Warning("FAILED wait for StatefulSet to reach generation")
+ return err
+ }
+
+ log.V(2).F().Info("Wait StatefulSet to reach ready status")
+ err = p.stsPoller.Poll(
+ ctx,
+ host,
+ func(_ctx context.Context, sts *apps.StatefulSet) bool {
+ _ = p.readyMarkDeleter.DeleteReadyMarkOnPodAndService(_ctx, host)
+ return k8s.IsStatefulSetReady(sts)
+ },
+ )
+ if err != nil {
+ log.V(1).F().Warning("FAILED wait StatefulSet to reach ready status")
+ return err
+ }
+
+ log.V(2).F().Info("Wait StatefulSet to reach ready status completed OK")
+ return nil
+}
diff --git a/pkg/controller/common/poller/domain/poller-host-statefulset.go b/pkg/controller/common/poller/domain/poller-host-statefulset.go
deleted file mode 100644
index 4afe5e916..000000000
--- a/pkg/controller/common/poller/domain/poller-host-statefulset.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package domain
-
-import (
- "context"
-
- apps "k8s.io/api/apps/v1"
-
- log "github.com/altinity/clickhouse-operator/pkg/announcer"
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/interfaces"
- "github.com/altinity/clickhouse-operator/pkg/model/k8s"
-)
-
-type readyMarkDeleter interface {
- DeleteReadyMarkOnPodAndService(ctx context.Context, host *api.Host) error
-}
-
-// HostStatefulSetPoller enriches StatefulSet poller with host capabilities
-type HostStatefulSetPoller struct {
- *StatefulSetPoller
- interfaces.IKubeSTS
- readyMarkDeleter
-}
-
-// NewHostStatefulSetPoller creates new HostStatefulSetPoller from StatefulSet poller
-func NewHostStatefulSetPoller(poller *StatefulSetPoller, kube interfaces.IKube, labeler readyMarkDeleter) *HostStatefulSetPoller {
- return &HostStatefulSetPoller{
- StatefulSetPoller: poller,
- IKubeSTS: kube.STS(),
- readyMarkDeleter: labeler,
- }
-}
-
-// WaitHostStatefulSetReady polls host's StatefulSet until it is ready
-func (p *HostStatefulSetPoller) WaitHostStatefulSetReady(ctx context.Context, host *api.Host) error {
- log.V(2).F().Info("Wait for StatefulSet to reach generation")
- err := p.PollHostStatefulSet(
- ctx,
- host,
- func(_ctx context.Context, sts *apps.StatefulSet) bool {
- if sts == nil {
- return false
- }
- p.deleteReadyMark(_ctx, host)
- return k8s.IsStatefulSetGeneration(sts, sts.Generation)
- },
- func(_ctx context.Context) {
- p.deleteReadyMark(_ctx, host)
- },
- )
- if err != nil {
- log.V(1).F().Warning("FAILED wait for StatefulSet to reach generation")
- return err
- }
-
- log.V(2).F().Info("Wait StatefulSet to reach ready status")
- err = p.PollHostStatefulSet(
- ctx,
- host,
- func(_ctx context.Context, sts *apps.StatefulSet) bool {
- p.deleteReadyMark(_ctx, host)
- return k8s.IsStatefulSetReady(sts)
- },
- func(_ctx context.Context) {
- p.deleteReadyMark(_ctx, host)
- },
- )
- if err != nil {
- log.V(1).F().Warning("FAILED wait StatefulSet to reach ready status")
- return err
- }
-
- return nil
-}
-
-//// waitHostNotReady polls host's StatefulSet for not exists or not ready
-//func (c *HostStatefulSetPoller) WaitHostNotReady(ctx context.Context, host *api.Host) error {
-// err := c.PollHostStatefulSet(
-// ctx,
-// host,
-// // Since we are waiting for host to be nopt readylet's assyme that it should exist already
-// // and thus let's set GetErrorTimeout to zero, since we are not expecting getter function
-// // to return any errors
-// poller.NewPollerOptions().
-// FromConfig(chop.Config()).
-// SetGetErrorTimeout(0),
-// func(_ context.Context, sts *apps.StatefulSet) bool {
-// return k8s.IsStatefulSetNotReady(sts)
-// },
-// nil,
-// )
-// if apiErrors.IsNotFound(err) {
-// err = nil
-// }
-//
-// return err
-//}
-
-//// WaitHostStatefulSetDeleted polls host's StatefulSet until it is not available
-//func (p *HostStatefulSetPoller) WaitHostStatefulSetDeleted(host *api.Host) {
-// for {
-// // TODO
-// // Probably there would be better way to wait until k8s reported StatefulSet deleted
-// if _, err := p.IKubeSTS.Get(context.TODO(), host); err == nil {
-// log.V(2).Info("cache NOT yet synced")
-// time.Sleep(15 * time.Second)
-// } else {
-// log.V(1).Info("cache synced")
-// return
-// }
-// }
-//}
-
-func (p *HostStatefulSetPoller) deleteReadyMark(ctx context.Context, host *api.Host) {
- if p == nil {
- return
- }
- if p.readyMarkDeleter == nil {
- log.V(3).F().Info("no mark deleter specified")
- return
- }
-
- log.V(3).F().Info("Has mark deleter specified")
- _ = p.readyMarkDeleter.DeleteReadyMarkOnPodAndService(ctx, host)
-}
diff --git a/pkg/controller/common/poller/domain/poller-host.go b/pkg/controller/common/poller/domain/poller-host.go
index d0ff2f570..70c5d3665 100644
--- a/pkg/controller/common/poller/domain/poller-host.go
+++ b/pkg/controller/common/poller/domain/poller-host.go
@@ -21,7 +21,6 @@ import (
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/chop"
"github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
"github.com/altinity/clickhouse-operator/pkg/util"
)
@@ -30,24 +29,24 @@ import (
func PollHost(
ctx context.Context,
host *api.Host,
- isDoneFn func(ctx context.Context, host *api.Host) bool,
+ isDoneFn func(context.Context, *api.Host) bool,
_opts ...*poller.Options,
) error {
if util.IsContextDone(ctx) {
- log.V(1).Info("poll host is done")
+ log.V(1).Info("poll is aborted")
return nil
}
- opts := poller.NewOptions().FromConfig(chop.Config())
- for _, opt := range _opts {
- opts = opts.Merge(opt)
- }
+ caption := fmt.Sprintf("%s/%s", host.Runtime.Address.Namespace, host.Runtime.Address.HostName)
+ opts := poller.NewOptionsFromConfig(_opts...)
functions := &poller.Functions{
+ Get: func(_ctx context.Context) (any, error) {
+ return nil, nil
+ },
IsDone: func(_ctx context.Context, _ any) bool {
return isDoneFn(_ctx, host)
},
}
- caption := fmt.Sprintf("%s/%s", host.Runtime.Address.Namespace, host.Runtime.Address.HostName)
return poller.New(ctx, caption).
WithOptions(opts).
diff --git a/pkg/controller/common/poller/domain/poller-statefulset.go b/pkg/controller/common/poller/domain/poller-statefulset.go
deleted file mode 100644
index 396735a90..000000000
--- a/pkg/controller/common/poller/domain/poller-statefulset.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package domain
-
-import (
- "context"
- "fmt"
-
- apps "k8s.io/api/apps/v1"
- apiErrors "k8s.io/apimachinery/pkg/api/errors"
-
- log "github.com/altinity/clickhouse-operator/pkg/announcer"
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/chop"
- "github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
- "github.com/altinity/clickhouse-operator/pkg/interfaces"
- "github.com/altinity/clickhouse-operator/pkg/util"
-)
-
-type StatefulSetPoller struct {
- kubeSTS interfaces.IKubeSTS
-}
-
-func NewStatefulSetPoller(kube interfaces.IKube) *StatefulSetPoller {
- return &StatefulSetPoller{
- kubeSTS: kube.STS(),
- }
-}
-
-// pollHostStatefulSet polls host's StatefulSet
-func (p *StatefulSetPoller) PollHostStatefulSet(
- ctx context.Context,
- host *api.Host,
- isDoneFn func(context.Context, *apps.StatefulSet) bool,
- backFn func(context.Context),
-) error {
- if util.IsContextDone(ctx) {
- log.V(11).Info("poll is aborted")
- return nil
- }
-
- return poller.New(
- ctx,
- fmt.Sprintf("%s/%s", host.Runtime.Address.Namespace, host.Runtime.Address.StatefulSet),
- ).WithOptions(
- poller.NewOptions().FromConfig(chop.Config()),
- ).WithFunctions(
- &poller.Functions{
- Get: func(_ctx context.Context) (any, error) {
- return p.kubeSTS.Get(ctx, host)
- },
- IsDone: func(_ctx context.Context, a any) bool {
- return isDoneFn(_ctx, a.(*apps.StatefulSet))
- },
- ShouldContinue: func(_ctx context.Context, _ any, e error) bool {
- return apiErrors.IsNotFound(e)
- },
- },
- ).WithBackground(
- &poller.BackgroundFunctions{
- F: backFn,
- },
- ).Poll()
-}
diff --git a/pkg/controller/common/poller/poller-functions.go b/pkg/controller/common/poller/poller-functions.go
index a094e88ac..302ad44c4 100644
--- a/pkg/controller/common/poller/poller-functions.go
+++ b/pkg/controller/common/poller/poller-functions.go
@@ -20,9 +20,10 @@ import (
)
type Functions struct {
- Get func(context.Context) (any, error)
- IsDone func(context.Context, any) bool
- ShouldContinue func(context.Context, any, error) bool
+ Get func(context.Context) (any, error)
+ IsDone func(context.Context, any) bool
+ ShouldContinueOnGetError func(context.Context, any, error) bool
+ Background func(context.Context)
}
func (p *Functions) CallGet(ctx context.Context) (any, error) {
@@ -49,12 +50,18 @@ func (p *Functions) CallShouldContinue(ctx context.Context, a any, e error) bool
if p == nil {
return false
}
- if p.ShouldContinue == nil {
+ if p.ShouldContinueOnGetError == nil {
return false
}
- return p.ShouldContinue(ctx, a, e)
+ return p.ShouldContinueOnGetError(ctx, a, e)
}
-type BackgroundFunctions struct {
- F func(context.Context)
+func (p *Functions) CallBackground(ctx context.Context) {
+ if p == nil {
+ return
+ }
+ if p.Background == nil {
+ return
+ }
+ p.Background(ctx)
}
diff --git a/pkg/controller/common/poller/poller-options.go b/pkg/controller/common/poller/poller-options.go
index 3c95a43b7..7a7803204 100644
--- a/pkg/controller/common/poller/poller-options.go
+++ b/pkg/controller/common/poller/poller-options.go
@@ -19,6 +19,7 @@ import (
"time"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/chop"
)
const (
@@ -40,6 +41,14 @@ func NewOptions() *Options {
return &Options{}
}
+func NewOptionsFromConfig(extraOpts ...*Options) *Options {
+ opts := NewOptions().FromConfig(chop.Config())
+ for _, opt := range extraOpts {
+ opts = opts.Merge(opt)
+ }
+ return opts
+}
+
// Ensure ensures poll options do exist
func (o *Options) Ensure() *Options {
if o == nil {
diff --git a/pkg/controller/common/poller/poller.go b/pkg/controller/common/poller/poller.go
index 06f38904b..4dc7630ec 100644
--- a/pkg/controller/common/poller/poller.go
+++ b/pkg/controller/common/poller/poller.go
@@ -28,15 +28,13 @@ type Poller interface {
Poll() error
WithOptions(opts *Options) Poller
WithFunctions(functions *Functions) Poller
- WithBackground(backgroundFunctions *BackgroundFunctions) Poller
}
type poller struct {
- ctx context.Context
- name string
- opts *Options
- functions *Functions
- background *BackgroundFunctions
+ ctx context.Context
+ name string
+ opts *Options
+ functions *Functions
}
func New(ctx context.Context, name string) Poller {
@@ -56,13 +54,15 @@ func (p *poller) WithFunctions(functions *Functions) Poller {
return p
}
-func (p *poller) WithBackground(backgroundFunctions *BackgroundFunctions) Poller {
- p.background = backgroundFunctions
- return p
+func (p *poller) preparePoll() {
+ p.opts = p.opts.Ensure()
+ if p.ctx == nil {
+ p.ctx = context.Background()
+ }
}
func (p *poller) Poll() error {
- opts := p.opts.Ensure()
+ p.preparePoll()
start := time.Now()
for {
if util.IsContextDone(p.ctx) {
@@ -73,7 +73,7 @@ func (p *poller) Poll() error {
item, err := p.functions.CallGet(p.ctx)
switch {
- // Object is found - process it
+ // Object is found or getter function is not specified
case err == nil:
if p.functions.CallIsDone(p.ctx, item) {
// All is good, job is done, exit
@@ -83,14 +83,15 @@ func (p *poller) Poll() error {
// Object is found, but processor function says we should continue polling
// exit switch
- // Object is not found - it either failed to be created or just still not created
+ // Object is not found - it is either failed to be created or just still not created yet
case p.functions.CallShouldContinue(p.ctx, item, err):
- if (opts.GetErrorTimeout > 0) && (time.Since(start) >= opts.GetErrorTimeout) {
+ // Error has happened but we should continue
+ if (p.opts.GetErrorTimeout > 0) && (time.Since(start) >= p.opts.GetErrorTimeout) {
// No more wait for the object to be created. Consider create process as failed.
log.V(1).M(p.name).F().Error("Poller.Get() FAILED because item is not available and get timeout reached for: %s. Abort", p.name)
return err
}
- // Error has happened but we should continue
+ // Timeout not reached, we should continue
// exit switch
// Error has happened and we should not continue, abort polling
@@ -102,7 +103,7 @@ func (p *poller) Poll() error {
// Continue polling
// May be time has come to abort polling?
- if time.Since(start) >= opts.Timeout {
+ if time.Since(start) >= p.opts.Timeout {
// Timeout reached, no good result available, time to abort
log.V(1).M(p.name).F().Info("poll(%s) - TIMEOUT reached", p.name)
return fmt.Errorf("poll(%s) - wait timeout", p.name)
@@ -111,44 +112,41 @@ func (p *poller) Poll() error {
// Continue polling
// May be time has come to start bothering with log messages?
- if time.Since(start) >= opts.StartBotheringAfterTimeout {
+ if time.Since(start) >= p.opts.StartBotheringAfterTimeout {
// Start bothering with log messages after some time only
log.V(1).M(p.name).F().Info("WAIT: %s", p.name)
}
// Wait some more time and launch background process(es)
log.V(2).M(p.name).F().Info("poll iteration")
- sleepAndRunBackgroundProcess(p.ctx, opts, p.background)
+ p.sleepAndRunBackgroundProcess()
} // for
}
-func sleepAndRunBackgroundProcess(ctx context.Context, opts *Options, background *BackgroundFunctions) {
- if ctx == nil {
- ctx = context.Background()
- }
+func (p *poller) sleepAndRunBackgroundProcess() {
switch {
- case opts.BackgroundInterval > 0:
- mainIntervalTimeout := time.After(opts.MainInterval)
- backgroundIntervalTimeout := time.After(opts.BackgroundInterval)
+ case p.opts.BackgroundInterval > 0:
+ mainIntervalTimeout := time.After(p.opts.MainInterval)
+ backgroundIntervalTimeout := time.After(p.opts.BackgroundInterval)
for {
select {
- case <-ctx.Done():
+ case <-p.ctx.Done():
// Context is done, nothing to do here more
return
+
case <-mainIntervalTimeout:
// Timeout reached, nothing to do here more
return
+
case <-backgroundIntervalTimeout:
// Function interval reached, time to call the func
- if background != nil {
- if background.F != nil {
- background.F(ctx)
- }
- }
- backgroundIntervalTimeout = time.After(opts.BackgroundInterval)
+ p.functions.CallBackground(p.ctx)
+ // Reload timeout
+ backgroundIntervalTimeout = time.After(p.opts.BackgroundInterval)
+ // continue for loop
}
}
default:
- util.WaitContextDoneOrTimeout(ctx, opts.MainInterval)
+ util.WaitContextDoneOrTimeout(p.ctx, p.opts.MainInterval)
}
}
diff --git a/pkg/controller/common/statefulset/statefulset-reconciler-aux.go b/pkg/controller/common/statefulset/statefulset-reconciler-aux.go
index ba101a9aa..8d06720c0 100644
--- a/pkg/controller/common/statefulset/statefulset-reconciler-aux.go
+++ b/pkg/controller/common/statefulset/statefulset-reconciler-aux.go
@@ -26,8 +26,9 @@ import (
"github.com/altinity/clickhouse-operator/pkg/util"
)
-type IHostStatefulSetPoller interface {
+type IHostObjectsPoller interface {
WaitHostStatefulSetReady(ctx context.Context, host *api.Host) error
+ WaitHostPodStarted(ctx context.Context, host *api.Host) error
}
type fallback interface {
diff --git a/pkg/controller/common/statefulset/statefulset-reconciler-options.go b/pkg/controller/common/statefulset/statefulset-reconciler-options.go
index 8d923ede3..134c46885 100644
--- a/pkg/controller/common/statefulset/statefulset-reconciler-options.go
+++ b/pkg/controller/common/statefulset/statefulset-reconciler-options.go
@@ -15,8 +15,9 @@
package statefulset
type ReconcileOptions struct {
- forceRecreate bool
- doNotWait bool
+ forceRecreate bool
+ waitUntilStarted bool
+ waitUntilReady bool
}
func NewReconcileStatefulSetOptions() *ReconcileOptions {
@@ -30,30 +31,43 @@ func (o *ReconcileOptions) Ensure() *ReconcileOptions {
return o
}
-func (o *ReconcileOptions) SetForceRecreate() *ReconcileOptions {
+func (o *ReconcileOptions) SetWaitUntilStarted() *ReconcileOptions {
o = o.Ensure()
- o.forceRecreate = true
+ o.waitUntilStarted = true
return o
}
-func (o *ReconcileOptions) IsForceRecreate() bool {
+func (o *ReconcileOptions) WaitUntilStarted() bool {
if o == nil {
return false
}
- return o.forceRecreate
+ return o.waitUntilStarted
}
-func (o *ReconcileOptions) SetDoNotWait() *ReconcileOptions {
+func (o *ReconcileOptions) SetWaitUntilReady() *ReconcileOptions {
o = o.Ensure()
- o.doNotWait = true
+ o.waitUntilReady = true
return o
}
-func (o *ReconcileOptions) IsDoNotWait() bool {
+func (o *ReconcileOptions) WaitUntilReady() bool {
if o == nil {
return false
}
- return o.doNotWait
+ return o.waitUntilReady
+}
+
+func (o *ReconcileOptions) SetForceRecreate() *ReconcileOptions {
+ o = o.Ensure()
+ o.forceRecreate = true
+ return o
+}
+
+func (o *ReconcileOptions) ForceRecreate() bool {
+ if o == nil {
+ return false
+ }
+ return o.forceRecreate
}
type ReconcileOptionsSet []*ReconcileOptions
diff --git a/pkg/controller/common/statefulset/statefulset-reconciler.go b/pkg/controller/common/statefulset/statefulset-reconciler.go
index cf2c35dbd..901cd4704 100644
--- a/pkg/controller/common/statefulset/statefulset-reconciler.go
+++ b/pkg/controller/common/statefulset/statefulset-reconciler.go
@@ -36,10 +36,10 @@ type Reconciler struct {
a a.Announcer
task *common.Task
- hostSTSPoller IHostStatefulSetPoller
- namer interfaces.INameManager
- labeler interfaces.ILabeler
- storage *storage.Reconciler
+ hostObjectsPoller IHostObjectsPoller
+ namer interfaces.INameManager
+ labeler interfaces.ILabeler
+ storage *storage.Reconciler
cr interfaces.IKubeCR
sts interfaces.IKubeSTS
@@ -50,7 +50,7 @@ type Reconciler struct {
func NewReconciler(
a a.Announcer,
task *common.Task,
- hostSTSPoller IHostStatefulSetPoller,
+ hostObjectsPoller IHostObjectsPoller,
namer interfaces.INameManager,
labeler interfaces.ILabeler,
storage *storage.Reconciler,
@@ -61,10 +61,10 @@ func NewReconciler(
a: a,
task: task,
- hostSTSPoller: hostSTSPoller,
- namer: namer,
- labeler: labeler,
- storage: storage,
+ hostObjectsPoller: hostObjectsPoller,
+ namer: namer,
+ labeler: labeler,
+ storage: storage,
cr: kube.CR(),
sts: kube.STS(),
@@ -165,7 +165,7 @@ func (r *Reconciler) ReconcileStatefulSet(
}
switch {
- case opts.IsForceRecreate():
+ case opts.ForceRecreate():
// Force recreate prevails over all other requests
_ = r.recreateStatefulSet(ctx, host, register, opts)
default:
@@ -252,7 +252,7 @@ func (r *Reconciler) updateStatefulSet(ctx context.Context, host *api.Host, regi
action := common.ErrCRUDRecreate
if k8s.IsStatefulSetReady(curStatefulSet) {
- action = r.doUpdateStatefulSet(ctx, curStatefulSet, newStatefulSet, host)
+ action = r.doUpdateStatefulSet(ctx, curStatefulSet, newStatefulSet, host, opts)
}
switch action {
@@ -365,16 +365,45 @@ func (r *Reconciler) doCreateStatefulSet(ctx context.Context, host *api.Host, op
return common.ErrCRUDRecreate
}
- if opts.IsDoNotWait() {
- // StatefulSet created, do not wait until host is ready, go by
- log.V(1).M(host).F().Info("Will NOT wait for StatefulSet to be ready, consider it is created successfully")
- } else {
- // StatefulSet created, wait until host is ready
- if err := r.hostSTSPoller.WaitHostStatefulSetReady(ctx, host); err != nil {
- log.V(1).M(host).F().Error("StatefulSet create wait failed. err: %v", err)
- return r.fallback.OnStatefulSetCreateFailed(ctx, host)
+ // StatefulSet created, wait until host is launched
+ if err := r.waitHostStatefulSetToLaunch(ctx, host, opts); err != nil {
+ log.V(1).M(host).F().Error("StatefulSet create wait failed. err: %v", err)
+ return r.fallback.OnStatefulSetCreateFailed(ctx, host)
+ }
+
+ return nil
+}
+
+func (r *Reconciler) waitHostStatefulSetToLaunch(ctx context.Context, host *api.Host, opts *ReconcileOptions) error {
+ // Whether StatefulSet has launched successfully
+ launched := false
+
+ // Start with waiting for startup probe
+ if opts.WaitUntilStarted() {
+ log.V(1).M(host).F().Info("Wait host pod started. Host: %s", host.GetName())
+ if err := r.hostObjectsPoller.WaitHostPodStarted(ctx, host); err != nil {
+ log.V(1).M(host).F().Error("Host pod wait failed. host: %s err: %v", host.GetName(), err)
+ return err
}
- log.V(2).M(host).F().Info("Target generation reached, StatefulSet created successfully")
+ log.V(1).M(host).F().Info("Host pod started. Host: %s", host.GetName())
+ launched = true
+ }
+
+ // Continue with waiting for ready probe
+ if opts.WaitUntilReady() {
+ log.V(1).M(host).F().Info("Wait host sts ready. Host: %s", host.GetName())
+ if err := r.hostObjectsPoller.WaitHostStatefulSetReady(ctx, host); err != nil {
+ log.V(1).M(host).F().Error("Host sts wait failed. host: %s err: %v", host.GetName(), err)
+ return err
+ }
+ log.V(1).M(host).F().Info("Host sts ready. Host: %s", host.GetName())
+ launched = true
+ }
+
+ if launched {
+ log.V(1).M(host).F().Info("Host launched. Host: %s", host.GetName())
+ } else {
+ log.V(1).M(host).F().Warning("Host is not properly launched - no waiting sts at all. Host: %s", host.GetName())
}
return nil
@@ -386,6 +415,7 @@ func (r *Reconciler) doUpdateStatefulSet(
oldStatefulSet *apps.StatefulSet,
newStatefulSet *apps.StatefulSet,
host *api.Host,
+ opts *ReconcileOptions,
) common.ErrorCRUD {
log.V(2).M(host).F().P()
// Apply newStatefulSet and wait for Generation to change
@@ -408,7 +438,8 @@ func (r *Reconciler) doUpdateStatefulSet(
log.V(1).M(host).F().Info("generation change %d=>%d", oldStatefulSet.Generation, updatedStatefulSet.Generation)
- if err := r.hostSTSPoller.WaitHostStatefulSetReady(ctx, host); err != nil {
+ // StatefulSet updated, wait until host is launched
+ if err := r.waitHostStatefulSetToLaunch(ctx, host, opts); err != nil {
log.V(1).M(host).F().Error("StatefulSet update FAILED - wait for ready StatefulSet failed. err: %v", err)
return r.fallback.OnStatefulSetUpdateFailed(ctx, oldStatefulSet, host, r.sts)
}
@@ -450,7 +481,7 @@ func (r *Reconciler) doDeleteStatefulSet(ctx context.Context, host *api.Host) er
}
// Wait until StatefulSet scales down to 0 pods count.
- _ = r.hostSTSPoller.WaitHostStatefulSetReady(ctx, host)
+ _ = r.hostObjectsPoller.WaitHostStatefulSetReady(ctx, host)
// And now delete empty StatefulSet
if err := r.sts.Delete(ctx, namespace, name); err == nil {
diff --git a/pkg/controller/common/task.go b/pkg/controller/common/task.go
index 3252e1ac1..fa275f61b 100644
--- a/pkg/controller/common/task.go
+++ b/pkg/controller/common/task.go
@@ -86,12 +86,12 @@ func (t *Task) WaitForConfigMapPropagation(ctx context.Context, host *api.Host)
// What timeout is expected to be enough for ConfigMap propagation?
// In case timeout is not specified, no need to wait
- if !host.GetCR().GetReconciling().HasConfigMapPropagationTimeout() {
+ if !host.GetCR().GetReconcile().HasConfigMapPropagationTimeout() {
log.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - not applicable due to missing timeout value")
return false
}
- timeout := host.GetCR().GetReconciling().GetConfigMapPropagationTimeoutDuration()
+ timeout := host.GetCR().GetReconcile().GetConfigMapPropagationTimeoutDuration()
// How much time has elapsed since last ConfigMap update?
// May be there is no need to wait already
diff --git a/pkg/controller/common/worker-log.go b/pkg/controller/common/worker-log.go
index 839b771d9..3b45f05d8 100644
--- a/pkg/controller/common/worker-log.go
+++ b/pkg/controller/common/worker-log.go
@@ -24,7 +24,7 @@ import (
// LogCR writes a CR into the log
func LogCR(name string, cr api.ICustomResource) {
- log.V(1).M(cr).Info(
+ log.V(2).M(cr).Info(
"logCR %s start--------------------------------------------:\n%s\nlogCR %s end--------------------------------------------",
name,
name,
@@ -34,7 +34,7 @@ func LogCR(name string, cr api.ICustomResource) {
// LogActionPlan logs action plan
func LogActionPlan(ap *action_plan.ActionPlan) {
- log.Info(
+ log.V(1).Info(
"ActionPlan start---------------------------------------------:\n%s\nActionPlan end---------------------------------------------",
ap,
)
diff --git a/pkg/interfaces/interfaces-kube.go b/pkg/interfaces/interfaces-kube.go
index 85efe7b69..124b55428 100644
--- a/pkg/interfaces/interfaces-kube.go
+++ b/pkg/interfaces/interfaces-kube.go
@@ -41,8 +41,8 @@ type IKube interface {
}
type IKubeConfigMap interface {
- Create(ctx context.Context, cm *core.ConfigMap) (*core.ConfigMap, error)
Get(ctx context.Context, namespace, name string) (*core.ConfigMap, error)
+ Create(ctx context.Context, cm *core.ConfigMap) (*core.ConfigMap, error)
Update(ctx context.Context, cm *core.ConfigMap) (*core.ConfigMap, error)
Delete(ctx context.Context, namespace, name string) error
List(ctx context.Context, namespace string, opts meta.ListOptions) ([]core.ConfigMap, error)
@@ -58,27 +58,27 @@ type IKubeEvent interface {
}
type IKubePDB interface {
- Create(ctx context.Context, pdb *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error)
Get(ctx context.Context, namespace, name string) (*policy.PodDisruptionBudget, error)
+ Create(ctx context.Context, pdb *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error)
Update(ctx context.Context, pdb *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error)
Delete(ctx context.Context, namespace, name string) error
List(ctx context.Context, namespace string, opts meta.ListOptions) ([]policy.PodDisruptionBudget, error)
}
type IKubePod interface {
- Get(params ...any) (*core.Pod, error)
- GetAll(obj any) []*core.Pod
+ Get(ctx context.Context, params ...any) (*core.Pod, error)
+ GetAll(ctx context.Context, obj any) []*core.Pod
Update(ctx context.Context, pod *core.Pod) (*core.Pod, error)
Delete(ctx context.Context, namespace, name string) error
}
type IKubePodEx interface {
- GetRestartCounters(params ...any) (map[string]int, error)
+ GetRestartCounters(ctx context.Context, params ...any) (map[string]int, error)
}
type IKubePVC interface {
- Create(ctx context.Context, pvc *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error)
Get(ctx context.Context, namespace, name string) (*core.PersistentVolumeClaim, error)
+ Create(ctx context.Context, pvc *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error)
Update(ctx context.Context, pvc *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error)
Delete(ctx context.Context, namespace, name string) error
List(ctx context.Context, namespace string, opts meta.ListOptions) ([]core.PersistentVolumeClaim, error)
diff --git a/pkg/interfaces/probe_type.go b/pkg/interfaces/probe_type.go
index 923d9a1b4..3b0bf2d90 100644
--- a/pkg/interfaces/probe_type.go
+++ b/pkg/interfaces/probe_type.go
@@ -17,6 +17,7 @@ package interfaces
type ProbeType string
const (
+ ProbeDefaultStartup ProbeType = "ProbeDefaultStartup"
ProbeDefaultLiveness ProbeType = "ProbeDefaultLiveness"
ProbeDefaultReadiness ProbeType = "ProbeDefaultReadiness"
)
diff --git a/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go
index c3c0ab373..292f882cf 100644
--- a/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go
+++ b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go
@@ -38,14 +38,7 @@ const (
toString(value) AS value,
'' AS description,
'gauge' AS type
- FROM system.asynchronous_metrics
- UNION ALL
- SELECT
- concat('metric.', metric) AS metric,
- toString(value) AS value,
- '' AS description,
- 'gauge' AS type
- FROM system.metrics
+ FROM merge('system','^(metrics|asynchronous_metrics|custom_metrics)$')
UNION ALL
SELECT
concat('event.', event) AS metric,
diff --git a/pkg/model/chi/config/generator.go b/pkg/model/chi/config/generator.go
index 3f1365b01..34dcd9b12 100644
--- a/pkg/model/chi/config/generator.go
+++ b/pkg/model/chi/config/generator.go
@@ -165,6 +165,11 @@ func (c *Generator) getHostZookeeper(host *chi.Host) string {
util.Iline(b, 8, "%s", zk.Identity)
}
+ // Append use_compression
+ if zk.UseCompression.IsValid() {
+ util.Iline(b, 8, "%s", zk.UseCompression)
+ }
+
//
util.Iline(b, 4, "")
diff --git a/pkg/model/chi/creator/probe.go b/pkg/model/chi/creator/probe.go
index 782397cc4..c73d4f3bc 100644
--- a/pkg/model/chi/creator/probe.go
+++ b/pkg/model/chi/creator/probe.go
@@ -31,6 +31,8 @@ func NewProbeManager() *ProbeManager {
func (m *ProbeManager) CreateProbe(what interfaces.ProbeType, host *api.Host) *core.Probe {
switch what {
+ case interfaces.ProbeDefaultStartup:
+ return m.createDefaultLivenessProbe(host)
case interfaces.ProbeDefaultLiveness:
return m.createDefaultLivenessProbe(host)
case interfaces.ProbeDefaultReadiness:
diff --git a/pkg/model/chi/namer/patterns.go b/pkg/model/chi/namer/patterns.go
index 63a1d2368..621398ff4 100644
--- a/pkg/model/chi/namer/patterns.go
+++ b/pkg/model/chi/namer/patterns.go
@@ -55,7 +55,7 @@ var patterns = types.List{
const (
// patternPodName is a name of a Pod within StatefulSet. In our setup each StatefulSet has only 1 pod,
// so all pods would have '-0' suffix after StatefulSet name
- // Ex.: StatefulSetName-0
+ // Ex.: -0
patternPodName = "%s-0"
)
diff --git a/pkg/model/chi/normalizer/normalizer.go b/pkg/model/chi/normalizer/normalizer.go
index 0af55ce7e..ec4d51709 100644
--- a/pkg/model/chi/normalizer/normalizer.go
+++ b/pkg/model/chi/normalizer/normalizer.go
@@ -109,6 +109,7 @@ func (n *Normalizer) applyExternalCRTemplatesOnTarget(templateRefSrc crTemplates
}
func (n *Normalizer) applyCROnTarget(cr *chi.ClickHouseInstallation) {
+ n.migrateReconcilingBackwardCompatibility(cr)
n.req.GetTarget().MergeFrom(cr, chi.MergeTypeOverrideByNonEmptyValues)
}
@@ -156,7 +157,8 @@ func (n *Normalizer) normalizeSpec() {
n.req.GetTarget().GetSpecT().Troubleshoot = n.normalizeTroubleshoot(n.req.GetTarget().GetSpecT().Troubleshoot)
n.req.GetTarget().GetSpecT().NamespaceDomainPattern = n.normalizeNamespaceDomainPattern(n.req.GetTarget().GetSpecT().NamespaceDomainPattern)
n.req.GetTarget().GetSpecT().Templating = n.normalizeTemplating(n.req.GetTarget().GetSpecT().Templating)
- n.req.GetTarget().GetSpecT().Reconciling = n.normalizeReconciling(n.req.GetTarget().GetSpecT().Reconciling)
+ n.normalizeReconciling()
+ n.req.GetTarget().GetSpecT().Reconcile = n.normalizeReconcile(n.req.GetTarget().GetSpecT().Reconcile)
n.req.GetTarget().GetSpecT().Defaults = n.normalizeDefaults(n.req.GetTarget().GetSpecT().Defaults)
n.normalizeConfiguration()
n.req.GetTarget().GetSpecT().Templates = n.normalizeTemplates(n.req.GetTarget().GetSpecT().Templates)
@@ -356,40 +358,84 @@ func (n *Normalizer) normalizeTemplating(templating *chi.ChiTemplating) *chi.Chi
return templating
}
-// normalizeReconciling normalizes .spec.reconciling
-func (n *Normalizer) normalizeReconciling(reconciling *chi.Reconciling) *chi.Reconciling {
- if reconciling == nil {
- reconciling = chi.NewReconciling().SetDefaults()
+func (n *Normalizer) migrateReconcilingBackwardCompatibility(cr *chi.ClickHouseInstallation) {
+ if cr == nil {
+ return
+ }
+ // Prefer to use Reconciling
+ if cr.Spec.Reconciling != nil {
+ cr.Spec.Reconcile = cr.Spec.Reconciling
+ cr.Spec.Reconciling = nil
+ }
+}
+
+func (n *Normalizer) normalizeReconciling() {
+ // Prefer to use Reconciling
+ if n.req.GetTarget().GetSpecT().Reconciling != nil {
+ n.req.GetTarget().GetSpecT().Reconcile = n.req.GetTarget().GetSpecT().Reconciling
+ n.req.GetTarget().GetSpecT().Reconciling = nil
+ }
+}
+
+// normalizeReconcile normalizes .spec.reconciling
+func (n *Normalizer) normalizeReconcile(reconcile *chi.ChiReconcile) *chi.ChiReconcile {
+ // Ensure reconcile is in place
+ if reconcile == nil {
+ reconcile = chi.NewChiReconcile().SetDefaults()
}
// Policy
- switch strings.ToLower(reconciling.GetPolicy()) {
+ switch strings.ToLower(reconcile.GetPolicy()) {
case strings.ToLower(chi.ReconcilingPolicyWait):
// Known value, overwrite it to ensure case-ness
- reconciling.SetPolicy(chi.ReconcilingPolicyWait)
+ reconcile.SetPolicy(chi.ReconcilingPolicyWait)
case strings.ToLower(chi.ReconcilingPolicyNoWait):
// Known value, overwrite it to ensure case-ness
- reconciling.SetPolicy(chi.ReconcilingPolicyNoWait)
+ reconcile.SetPolicy(chi.ReconcilingPolicyNoWait)
default:
// Unknown value, fallback to default
- reconciling.SetPolicy(chi.ReconcilingPolicyUnspecified)
+ reconcile.SetPolicy(chi.ReconcilingPolicyUnspecified)
}
// ConfigMapPropagationTimeout
// No normalization yet
// Cleanup
- reconciling.SetCleanup(n.normalizeReconcilingCleanup(reconciling.GetCleanup()))
+ reconcile.SetCleanup(n.normalizeReconcileCleanup(reconcile.GetCleanup()))
- // Runtime
- // No normalization yet
// Macros
// No normalization yet
- return reconciling
+ // Runtime
+ // Inherit from chop Config
+ reconcile.InheritRuntimeFrom(chop.Config().Reconcile.Runtime)
+ reconcile.Runtime = n.normalizeReconcileRuntime(reconcile.Runtime)
+
+ // Host
+ // Inherit from chop Config
+ reconcile.InheritHostFrom(chop.Config().Reconcile.Host)
+ reconcile.Host = n.normalizeReconcileHost(reconcile.Host)
+
+ return reconcile
}
-func (n *Normalizer) normalizeReconcilingCleanup(cleanup *chi.Cleanup) *chi.Cleanup {
+func (n *Normalizer) normalizeReconcileRuntime(runtime chi.ReconcileRuntime) chi.ReconcileRuntime {
+ if runtime.ReconcileShardsThreadsNumber == 0 {
+ runtime.ReconcileShardsThreadsNumber = defaultReconcileShardsThreadsNumber
+ }
+ if runtime.ReconcileShardsMaxConcurrencyPercent == 0 {
+ runtime.ReconcileShardsMaxConcurrencyPercent = defaultReconcileShardsMaxConcurrencyPercent
+ }
+ return runtime
+}
+
+func (n *Normalizer) normalizeReconcileHost(rh chi.ReconcileHost) chi.ReconcileHost {
+ // Normalize
+ rh = rh.Normalize()
+ return rh
+}
+
+func (n *Normalizer) normalizeReconcileCleanup(cleanup *chi.Cleanup) *chi.Cleanup {
if cleanup == nil {
cleanup = chi.NewCleanup()
}
@@ -620,7 +666,7 @@ func (n *Normalizer) replacers(section replacerSection, scope any, additional ..
// Should scope macros be applied - depends on whether macros are enabled in the section
shouldApplyScopeMacros := false
// Shortcut to macros enabled/disabled toggles
- sectionToggles := n.req.GetTarget().Spec.Reconciling.Macros.Sections
+ sectionToggles := n.req.GetTarget().Spec.Reconcile.Macros.Sections
switch section {
case replacerFiles:
@@ -784,7 +830,7 @@ func (n *Normalizer) normalizeClusterStage2(cluster *chi.Cluster) *chi.Cluster {
// Inherit from .spec.configuration.files
cluster.InheritFilesFrom(n.req.GetTarget())
// Inherit from .spec.reconciling
- cluster.InheritReconcileFrom(n.req.GetTarget())
+ cluster.InheritClusterReconcileFrom(n.req.GetTarget())
// Inherit from .spec.defaults
cluster.InheritTemplatesFrom(n.req.GetTarget())
@@ -949,25 +995,10 @@ func (n *Normalizer) normalizeClusterLayoutShardsCountAndReplicasCount(clusterLa
func (n *Normalizer) normalizeClusterReconcile(reconcile chi.ClusterReconcile) chi.ClusterReconcile {
reconcile.Runtime = n.normalizeReconcileRuntime(reconcile.Runtime)
+ reconcile.Host = n.normalizeReconcileHost(reconcile.Host)
return reconcile
}
-func (n *Normalizer) normalizeReconcileRuntime(runtime chi.ReconcileRuntime) chi.ReconcileRuntime {
- if runtime.ReconcileShardsThreadsNumber == 0 {
- runtime.ReconcileShardsThreadsNumber = chop.Config().Reconcile.Runtime.ReconcileShardsThreadsNumber
- }
- if runtime.ReconcileShardsThreadsNumber == 0 {
- runtime.ReconcileShardsThreadsNumber = defaultReconcileShardsThreadsNumber
- }
- if runtime.ReconcileShardsMaxConcurrencyPercent == 0 {
- runtime.ReconcileShardsMaxConcurrencyPercent = chop.Config().Reconcile.Runtime.ReconcileShardsMaxConcurrencyPercent
- }
- if runtime.ReconcileShardsMaxConcurrencyPercent == 0 {
- runtime.ReconcileShardsMaxConcurrencyPercent = defaultReconcileShardsMaxConcurrencyPercent
- }
- return runtime
-}
-
// ensureClusterLayoutShards ensures slice layout.Shards is in place
func (n *Normalizer) ensureClusterLayoutShards(layout *chi.ChiClusterLayout) {
// Disposition of shards in slice would be
diff --git a/pkg/model/chk/creator/probe.go b/pkg/model/chk/creator/probe.go
index 3b63cee5b..85f8dba5c 100644
--- a/pkg/model/chk/creator/probe.go
+++ b/pkg/model/chk/creator/probe.go
@@ -33,6 +33,8 @@ func NewProbeManager() *ProbeManager {
func (m *ProbeManager) CreateProbe(what interfaces.ProbeType, host *api.Host) *core.Probe {
switch what {
+ case interfaces.ProbeDefaultStartup:
+ return nil
case interfaces.ProbeDefaultLiveness:
return m.createDefaultLivenessProbe(host)
case interfaces.ProbeDefaultReadiness:
diff --git a/pkg/model/chk/normalizer/normalizer.go b/pkg/model/chk/normalizer/normalizer.go
index 724f42426..881b060d3 100644
--- a/pkg/model/chk/normalizer/normalizer.go
+++ b/pkg/model/chk/normalizer/normalizer.go
@@ -101,6 +101,7 @@ func (n *Normalizer) applyExternalCRTemplatesOnTarget(templateRefSrc crTemplates
}
func (n *Normalizer) applyCROnTarget(cr *chk.ClickHouseKeeperInstallation) {
+ n.migrateReconcilingBackwardCompatibility(cr)
n.req.GetTarget().MergeFrom(cr, chi.MergeTypeOverrideByNonEmptyValues)
}
@@ -143,7 +144,8 @@ func (n *Normalizer) normalizeSpec() {
// Walk over Spec datatype fields
n.req.GetTarget().GetSpecT().TaskID = n.normalizeTaskID(n.req.GetTarget().GetSpecT().TaskID)
n.req.GetTarget().GetSpecT().NamespaceDomainPattern = n.normalizeNamespaceDomainPattern(n.req.GetTarget().GetSpecT().NamespaceDomainPattern)
- n.req.GetTarget().GetSpecT().Reconciling = n.normalizeReconciling(n.req.GetTarget().GetSpecT().Reconciling)
+ n.normalizeReconciling()
+ n.req.GetTarget().GetSpecT().Reconcile = n.normalizeReconcile(n.req.GetTarget().GetSpecT().Reconcile)
n.req.GetTarget().GetSpecT().Defaults = n.normalizeDefaults(n.req.GetTarget().GetSpecT().Defaults)
n.normalizeConfiguration()
n.req.GetTarget().GetSpecT().Templates = n.normalizeTemplates(n.req.GetTarget().GetSpecT().Templates)
@@ -286,40 +288,63 @@ func (n *Normalizer) normalizeTemplates(templates *chi.Templates) *chi.Templates
return templates
}
-// normalizeReconciling normalizes .spec.reconciling
-func (n *Normalizer) normalizeReconciling(reconciling *chi.Reconciling) *chi.Reconciling {
- if reconciling == nil {
- reconciling = chi.NewReconciling().SetDefaults()
+func (n *Normalizer) migrateReconcilingBackwardCompatibility(cr *chk.ClickHouseKeeperInstallation) {
+ if cr == nil {
+ return
+ }
+ // Prefer to use Reconciling
+ if cr.Spec.Reconciling != nil {
+ cr.Spec.Reconcile = cr.Spec.Reconciling
+ cr.Spec.Reconciling = nil
+ }
+}
+
+func (n *Normalizer) normalizeReconciling() {
+ // Prefer to use Reconciling
+ if n.req.GetTarget().GetSpecT().Reconciling != nil {
+ n.req.GetTarget().GetSpecT().Reconcile = n.req.GetTarget().GetSpecT().Reconciling
+ n.req.GetTarget().GetSpecT().Reconciling = nil
+ }
+}
+
+// normalizeReconcile normalizes .spec.reconciling
+func (n *Normalizer) normalizeReconcile(reconcile *chi.ChiReconcile) *chi.ChiReconcile {
+ // Ensure reconcile is in place
+ if reconcile == nil {
+ reconcile = chi.NewChiReconcile().SetDefaults()
}
// Policy
- switch strings.ToLower(reconciling.GetPolicy()) {
+ switch strings.ToLower(reconcile.GetPolicy()) {
case strings.ToLower(chi.ReconcilingPolicyWait):
// Known value, overwrite it to ensure case-ness
- reconciling.SetPolicy(chi.ReconcilingPolicyWait)
+ reconcile.SetPolicy(chi.ReconcilingPolicyWait)
case strings.ToLower(chi.ReconcilingPolicyNoWait):
// Known value, overwrite it to ensure case-ness
- reconciling.SetPolicy(chi.ReconcilingPolicyNoWait)
+ reconcile.SetPolicy(chi.ReconcilingPolicyNoWait)
default:
// Unknown value, fallback to default
- reconciling.SetPolicy(chi.ReconcilingPolicyUnspecified)
+ reconcile.SetPolicy(chi.ReconcilingPolicyUnspecified)
}
// ConfigMapPropagationTimeout
// No normalization yet
// Cleanup
- reconciling.SetCleanup(n.normalizeReconcilingCleanup(reconciling.GetCleanup()))
+ reconcile.SetCleanup(n.normalizeReconcileCleanup(reconcile.GetCleanup()))
- // Runtime
- // No normalization yet
// Macros
// No normalization yet
- return reconciling
+ // Runtime
+ // No normalization yet
+
+ // Host
+ // No normalization yet
+ return reconcile
}
-func (n *Normalizer) normalizeReconcilingCleanup(cleanup *chi.Cleanup) *chi.Cleanup {
+func (n *Normalizer) normalizeReconcileCleanup(cleanup *chi.Cleanup) *chi.Cleanup {
if cleanup == nil {
cleanup = chi.NewCleanup()
}
diff --git a/pkg/model/clickhouse/cluster.go b/pkg/model/clickhouse/cluster.go
index f67057818..88789927e 100644
--- a/pkg/model/clickhouse/cluster.go
+++ b/pkg/model/clickhouse/cluster.go
@@ -167,6 +167,12 @@ func (c *Cluster) exec(ctx context.Context, host string, queries []string, _opts
sqlAttach := strings.ReplaceAll(sql, "CREATE TABLE", "ATTACH TABLE")
err = conn.Exec(ctx, sqlAttach, opts)
}
+ if err != nil && strings.Contains(err.Error(), "Code: 117") && strings.Contains(sql, "CREATE TABLE") {
+ // WARNING: error message or code may change in newer ClickHouse versions
+ c.l.V(1).M(host).F().Info("Directory for table already exists. Trying ATTACH TABLE instead")
+ sqlAttach := strings.ReplaceAll(sql, "CREATE TABLE", "ATTACH TABLE")
+ err = conn.Exec(ctx, sqlAttach, opts)
+ }
if err == nil || strings.Contains(err.Error(), "ALREADY_EXISTS") {
queries[i] = "" // Query is executed or object already exists, removing from the list
} else {
diff --git a/pkg/model/common/creator/stateful-set-application.go b/pkg/model/common/creator/stateful-set-application.go
index 82f41a03d..03cd02913 100644
--- a/pkg/model/common/creator/stateful-set-application.go
+++ b/pkg/model/common/creator/stateful-set-application.go
@@ -104,6 +104,11 @@ func (c *Creator) stsEnsureAppContainerProbesSpecified(statefulSet *apps.Statefu
return
}
+ if container.StartupProbe == nil {
+ if host.GetCluster().GetReconcile().Host.Wait.Probes.GetStartup().IsTrue() {
+ container.StartupProbe = c.pm.CreateProbe(interfaces.ProbeDefaultStartup, host)
+ }
+ }
if container.LivenessProbe == nil {
container.LivenessProbe = c.pm.CreateProbe(interfaces.ProbeDefaultLiveness, host)
}
diff --git a/pkg/model/common/normalizer/subst/settings.go b/pkg/model/common/normalizer/subst/settings.go
index fc2f90898..91edcac1d 100644
--- a/pkg/model/common/normalizer/subst/settings.go
+++ b/pkg/model/common/normalizer/subst/settings.go
@@ -67,12 +67,17 @@ func substSettingsFieldWithDataFromDataSource(
}
// Create setting from the secret with a provided function
- if newSetting, err := newSettingCreator(secretAddress); err == nil {
- // Set the new setting as dst.
- // Replacing src in case src name is the same as dst name.
- settings.Set(dstField, newSetting)
+ newSetting, err := newSettingCreator(secretAddress)
+ if err != nil {
+ // Unable to create new setting
+ // No substitution done
+ return false
}
+ // Set the new setting as dst.
+ // Replacing src in case src name is the same as dst name.
+ settings.Set(dstField, newSetting)
+
// In case we are NOT replacing the same field with its new value, then remove the source field.
// Typically non-replaced source field is not expected to be included into the final config,
// mainly because very often these source fields are synthetic ones (do not exist in config fields list).
@@ -92,7 +97,12 @@ func ReplaceSettingsFieldWithSecretFieldValue(
srcSecretRefField string,
secretGet SecretGetter,
) bool {
- return substSettingsFieldWithDataFromDataSource(settings, req.GetTargetNamespace(), dstField, srcSecretRefField, true,
+ return substSettingsFieldWithDataFromDataSource(
+ settings,
+ req.GetTargetNamespace(),
+ dstField,
+ srcSecretRefField,
+ true,
func(secretAddress types.ObjectAddress) (*api.Setting, error) {
secretFieldValue, err := fetchSecretFieldValue(secretAddress, secretGet)
if err != nil {
@@ -100,7 +110,8 @@ func ReplaceSettingsFieldWithSecretFieldValue(
}
// Create new setting with the value
return api.NewSettingScalar(secretFieldValue), nil
- })
+ },
+ )
}
// ReplaceSettingsFieldWithEnvRefToSecretField substitute users settings field with ref to ENV var where value from k8s secret is stored in
@@ -112,11 +123,20 @@ func ReplaceSettingsFieldWithEnvRefToSecretField(
envVarNamePrefix string,
parseScalarString bool,
) bool {
- return substSettingsFieldWithDataFromDataSource(settings, req.GetTargetNamespace(), dstField, srcSecretRefField, parseScalarString,
+ return substSettingsFieldWithDataFromDataSource(
+ settings,
+ req.GetTargetNamespace(),
+ dstField,
+ srcSecretRefField,
+ parseScalarString,
func(secretAddress types.ObjectAddress) (*api.Setting, error) {
// ENV VAR name and value
// In case not OK env var name will be empty and config will be incorrect. CH may not start
- envVarName, _ := util.BuildShellEnvVarName(envVarNamePrefix + "_" + settings.Name2Key(dstField))
+ envVarName, ok := util.BuildShellEnvVarName(envVarNamePrefix + "_" + settings.Name2Key(dstField))
+ if !ok {
+ return nil, fmt.Errorf("unable to build shell env var name for dstField: %s", dstField)
+ }
+
req.AppendAdditionalEnvVar(
core.EnvVar{
Name: envVarName,
@@ -130,6 +150,7 @@ func ReplaceSettingsFieldWithEnvRefToSecretField(
},
},
)
+
// Create new setting w/o value but with attribute to read from ENV var
return api.NewSettingScalar("").SetAttribute("from_env", envVarName), nil
})
diff --git a/pkg/model/k8s/container.go b/pkg/model/k8s/container.go
index 4e9771ff2..a139e0f28 100644
--- a/pkg/model/k8s/container.go
+++ b/pkg/model/k8s/container.go
@@ -26,6 +26,14 @@ func PodSpecAddContainer(podSpec *core.PodSpec, container core.Container) {
podSpec.Containers = append(podSpec.Containers, container)
}
+func ContainerWalkVolumeMounts(container *core.Container, f func(volumeMount *core.VolumeMount)) {
+ for i := range container.VolumeMounts {
+ // Convenience wrapper
+ volumeMount := &container.VolumeMounts[i]
+ f(volumeMount)
+ }
+}
+
// ContainerAppendVolumeMounts appends multiple VolumeMount(s) to the specified container
func ContainerAppendVolumeMounts(container *core.Container, volumeMounts ...core.VolumeMount) {
for _, volumeMount := range volumeMounts {
@@ -48,10 +56,7 @@ func VolumeMountIsValid(volumeMount core.VolumeMount) bool {
// ContainerAppendVolumeMount appends one VolumeMount to the specified container
func ContainerAppendVolumeMount(container *core.Container, volumeMount core.VolumeMount) {
- //
// Sanity checks
- //
-
if container == nil {
return
}
@@ -61,29 +66,33 @@ func ContainerAppendVolumeMount(container *core.Container, volumeMount core.Volu
return
}
+ unableToAppend := false
+
// Check that:
- // 1. Mountable item (VolumeClaimTemplate or Volume) specified in this VolumeMount is NOT already mounted
+ // 1. Mountable item (VolumeClaimTemplate or Volume) specified in VolumeMount to add is NOT already mounted
// in this container by any other VolumeMount (to avoid double-mount of a mountable item)
// 2. And specified `mountPath` (say '/var/lib/clickhouse') is NOT already mounted in this container
// by any VolumeMount (to avoid double-mount/rewrite into single `mountPath`)
- for i := range container.VolumeMounts {
- // Convenience wrapper
- existingVolumeMount := &container.VolumeMounts[i]
-
+ ContainerWalkVolumeMounts(container, func(existingVolumeMount *core.VolumeMount) {
// 1. Check whether this mountable item is already listed in VolumeMount of this container
if volumeMount.Name == existingVolumeMount.Name {
// This .templates.VolumeClaimTemplate is already used in VolumeMount
- return
+ unableToAppend = true
}
// 2. Check whether `mountPath` (say '/var/lib/clickhouse') is already mounted
if volumeMount.MountPath == existingVolumeMount.MountPath {
- // `mountPath` (say /var/lib/clickhouse) is already mounted
- return
+ // `mountPath` (say /var/lib/clickhouse) is already used as mount point
+ unableToAppend = true
}
+ })
+
+ if unableToAppend {
+ // VolumeMount is not good to be added into container
+ return
}
- // Add VolumeMount to ClickHouse container to `mountPath` point
+ // Add VolumeMount into container to `mountPath` point
container.VolumeMounts = append(container.VolumeMounts, volumeMount)
}
diff --git a/pkg/model/k8s/pod.go b/pkg/model/k8s/pod.go
index 768623da8..3549866f0 100644
--- a/pkg/model/k8s/pod.go
+++ b/pkg/model/k8s/pod.go
@@ -18,7 +18,8 @@ import (
core "k8s.io/api/core/v1"
)
-func PodRestartCountersGet(pod *core.Pod) map[string]int {
+// Maps container name => restarts count
+func PodContainersRestartCountsGet(pod *core.Pod) map[string]int {
if pod == nil {
return nil
}
@@ -58,16 +59,24 @@ func PodHasNotReadyContainers(pod *core.Pod) bool {
}
func PodHasAllContainersStarted(pod *core.Pod) bool {
- allStarted := true
+ allContainersStarted := true
for _, containerStatus := range pod.Status.ContainerStatuses {
+ // Started indicates whether the container has finished its postStart lifecycle hook
+ // and passed its startup probe.
+ // Initialized as false, becomes true after startupProbe is considered
+ // successful. Resets to false when the container is restarted, or if kubelet
+ // loses state temporarily. In both cases, startup probes will run again.
+ // Is always true when no startupProbe is defined and container is running and
+ // has passed the postStart lifecycle hook. The null value must be treated the
+ // same as false.
if (containerStatus.Started != nil) && (*containerStatus.Started) {
// Current container is started. no changes in all status
} else {
// Current container is NOT started
- allStarted = false
+ allContainersStarted = false
}
}
- return allStarted
+ return allContainersStarted
}
func PodHasNotStartedContainers(pod *core.Pod) bool {
diff --git a/pkg/model/k8s/stateful_set.go b/pkg/model/k8s/stateful_set.go
index c21f09839..9fcfc9b2e 100644
--- a/pkg/model/k8s/stateful_set.go
+++ b/pkg/model/k8s/stateful_set.go
@@ -49,16 +49,14 @@ func StatefulSetContainerGet(statefulSet *apps.StatefulSet, namesOrIndexes ...an
return nil, false
}
-// IsStatefulSetGeneration returns whether StatefulSet has requested generation or not
-func IsStatefulSetGeneration(statefulSet *apps.StatefulSet, generation int64) bool {
+// IsStatefulSetReconcileCompleted returns whether StatefulSet reconcile completed
+func IsStatefulSetReconcileCompleted(statefulSet *apps.StatefulSet) bool {
if statefulSet == nil {
return false
}
- // StatefulSet has .spec generation we are looking for
- return (statefulSet.Generation == generation) &&
- // and this .spec generation is being applied to replicas - it is observed right now
- (statefulSet.Status.ObservedGeneration == statefulSet.Generation) &&
+ // the .spec generation is being applied to replicas - it is observed right now
+ return (statefulSet.Status.ObservedGeneration == statefulSet.Generation) &&
// and all replicas are of expected generation
(statefulSet.Status.CurrentReplicas == *statefulSet.Spec.Replicas) &&
// and all replicas are updated - meaning rolling update completed over all replicas
@@ -134,14 +132,12 @@ func StatefulSetAppendPersistentVolumeClaims(statefulSet *apps.StatefulSet, pvcs
func StatefulSetAppendVolumeMountsInAllContainers(statefulSet *apps.StatefulSet, volumeMounts ...core.VolumeMount) {
// And reference these Volumes in each Container via VolumeMount
// So Pod will have VolumeMounts mounted as Volumes
- for i := range statefulSet.Spec.Template.Spec.Containers {
- // Convenience wrapper
- container := &statefulSet.Spec.Template.Spec.Containers[i]
+ StatefulSetWalkContainers(statefulSet, func(container *core.Container) {
ContainerAppendVolumeMounts(
container,
volumeMounts...,
)
- }
+ })
}
func StatefulSetWalkContainers(statefulSet *apps.StatefulSet, f func(*core.Container)) {
@@ -154,10 +150,6 @@ func StatefulSetWalkContainers(statefulSet *apps.StatefulSet, f func(*core.Conta
func StatefulSetWalkVolumeMounts(statefulSet *apps.StatefulSet, f func(*core.VolumeMount)) {
StatefulSetWalkContainers(statefulSet, func(container *core.Container) {
- for j := range container.VolumeMounts {
- // Convenience wrapper
- volumeMount := &container.VolumeMounts[j]
- f(volumeMount)
- }
+ ContainerWalkVolumeMounts(container, f)
})
}
diff --git a/pkg/model/k8s/volume.go b/pkg/model/k8s/volume.go
index aac99a494..0ad2ee2bd 100644
--- a/pkg/model/k8s/volume.go
+++ b/pkg/model/k8s/volume.go
@@ -17,12 +17,12 @@ package k8s
import core "k8s.io/api/core/v1"
// CreateVolumeForPVC returns core.Volume object with specified name
-func CreateVolumeForPVC(name, claimName string) core.Volume {
+func CreateVolumeForPVC(volumeName, pvcName string) core.Volume {
return core.Volume{
- Name: name,
+ Name: volumeName,
VolumeSource: core.VolumeSource{
PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{
- ClaimName: claimName,
+ ClaimName: pvcName,
ReadOnly: false,
},
},
@@ -30,14 +30,14 @@ func CreateVolumeForPVC(name, claimName string) core.Volume {
}
// CreateVolumeForConfigMap returns core.Volume object with defined name
-func CreateVolumeForConfigMap(name string) core.Volume {
+func CreateVolumeForConfigMap(volumeName string) core.Volume {
var defaultMode int32 = 0644
return core.Volume{
- Name: name,
+ Name: volumeName,
VolumeSource: core.VolumeSource{
ConfigMap: &core.ConfigMapVolumeSource{
LocalObjectReference: core.LocalObjectReference{
- Name: name,
+ Name: volumeName,
},
DefaultMode: &defaultMode,
},
diff --git a/pkg/util/array.go b/pkg/util/array.go
index e12abd0af..931f9092e 100644
--- a/pkg/util/array.go
+++ b/pkg/util/array.go
@@ -31,11 +31,11 @@ func InArray(needle string, haystack []string) bool {
return false
}
-// InArrayWithRegexp checks whether the needle can be matched by haystack
-func InArrayWithRegexp(needle string, haystack []string) bool {
- for _, item := range haystack {
- matched, _ := regexp.MatchString(item, needle)
- if item == needle || matched {
+// MatchArrayOfRegexps checks whether the needle can be matched by haystack
+func MatchArrayOfRegexps(needle string, haystack []string) bool {
+ for _, pattern := range haystack {
+ matched, _ := regexp.MatchString(pattern, needle)
+ if pattern == needle || matched {
return true
}
}
diff --git a/pkg/util/shell.go b/pkg/util/shell.go
index 24c5b1b8a..c25ab488a 100644
--- a/pkg/util/shell.go
+++ b/pkg/util/shell.go
@@ -15,39 +15,61 @@
package util
import (
+ "crypto/md5"
+ "encoding/hex"
"regexp"
"strings"
)
-const shellEnvVarNameMaxLength int = 63
+const shellEnvVarNameBaseMaxLength int = 63
+const shellEnvVarNameFullMaxLength int = 127
var shellEnvVarNameRegexp = regexp.MustCompile("^[A-Z]([_A-Z0-9]*[A-Z0-9])?$")
var shellEnvVarNameStartRegexp = regexp.MustCompile("^[A-Z]")
var shellEnvVarNameNotAllowedCharsRegexp = regexp.MustCompile("[^_A-Z0-9]")
var shellEnvVarNameReplaceCharsRegexp = regexp.MustCompile("[/]")
-func BuildShellEnvVarName(str string) (string, bool) {
+func BuildShellEnvVarName(str string) (name string, ok bool) {
+ // Do not touch original value
+ name = str
+
// Must be uppercase
- str = strings.ToUpper(str)
+ name = strings.ToUpper(name)
+
// First char must comply to start regexp
- for len(str) > 0 {
- if shellEnvVarNameStartRegexp.MatchString(str) {
+ // Cut the first char until it is reasonable
+ for len(name) > 0 {
+ if shellEnvVarNameStartRegexp.MatchString(name) {
break
} else {
- str = str[1:]
+ name = name[1:]
}
}
+
// Replace replaceable chars
- str = shellEnvVarNameReplaceCharsRegexp.ReplaceAllString(str, "_")
+ name = shellEnvVarNameReplaceCharsRegexp.ReplaceAllString(name, "_")
// Remove not allowed chars
- str = shellEnvVarNameNotAllowedCharsRegexp.ReplaceAllString(str, "")
+ name = shellEnvVarNameNotAllowedCharsRegexp.ReplaceAllString(name, "")
+
// Must have limited length
- if len(str) > shellEnvVarNameMaxLength {
- str = str[0:shellEnvVarNameMaxLength]
+ suffix := ""
+ if len(name) > shellEnvVarNameBaseMaxLength {
+ // Cut the name
+ name = name[0:shellEnvVarNameBaseMaxLength]
+ // Prepare fixed length suffix out of original string
+ hash := md5.Sum([]byte(str))
+ suffix = "_" + strings.ToUpper(hex.EncodeToString(hash[:]))
}
- if IsShellEnvVarName(str) {
- return str, true
+ // Ensure no trailing underscores
+ name = strings.TrimRight(name, "_")
+
+ // Append suffix to keep name uniqueness
+ name += suffix
+
+ // It still has to be a valid env ma,e after all
+ if IsShellEnvVarName(name) {
+ return name, true
}
return "", false
@@ -55,7 +77,7 @@ func BuildShellEnvVarName(str string) (string, bool) {
// IsShellEnvVarName tests for a string that conforms to the definition of a shell ENV VAR name
func IsShellEnvVarName(value string) bool {
- if len(value) > shellEnvVarNameMaxLength {
+ if len(value) > shellEnvVarNameFullMaxLength {
return false
}
if !shellEnvVarNameRegexp.MatchString(value) {
diff --git a/release b/release
index 3d9dcb1ba..35aa2f3c0 100644
--- a/release
+++ b/release
@@ -1 +1 @@
-0.25.3
+0.25.4
diff --git a/releases b/releases
index 578405931..9564c9b5e 100644
--- a/releases
+++ b/releases
@@ -1,3 +1,4 @@
+0.25.3
0.25.2
0.25.1
0.25.0
diff --git a/tests/e2e/kubectl.py b/tests/e2e/kubectl.py
index 9fd655257..29bbc381a 100644
--- a/tests/e2e/kubectl.py
+++ b/tests/e2e/kubectl.py
@@ -410,9 +410,11 @@ def wait_pod_status(pod, status, shell=None, ns=None):
def get_pod_status(pod, shell=None, ns=None):
return get_field("pod", pod, ".status.phase", ns, shell=shell)
+def wait_container_status(pod, status, shell=None, ns=None):
+ wait_field("pod", pod, ".status.containerStatuses[0].ready", status, ns, shell=shell)
-def wait_container_status(pod, status, ns=None):
- wait_field("pod", pod, ".status.containerStatuses[0].ready", status, ns)
+def get_container_status(pod, shell=None, ns=None):
+ return get_field("pod", pod, ".status.containerStatuses[0]", ns, shell=shell)
def wait_field(
@@ -512,6 +514,14 @@ def get_pod_spec(chi_name, pod_name="", ns=None, shell=None):
pod = get("pod", pod_name, ns=ns, shell=shell)
return pod["spec"]
+def get_pod_status_full(chi_name, pod_name="", ns=None, shell=None):
+ label = f"-l clickhouse.altinity.com/chi={chi_name}"
+ if pod_name == "":
+ pod = get("pod", "", ns=ns, label=label, shell=shell)["items"][0]
+ else:
+ pod = get("pod", pod_name, ns=ns, shell=shell)
+ return pod["status"]
+
def get_clickhouse_start(chi_name, ns=None, shell=None):
pod_name = get_pod_names(chi_name, ns=ns, shell=shell)[0]
diff --git a/tests/e2e/manifests/chi/test-011-secrets.yaml b/tests/e2e/manifests/chi/test-011-secrets.yaml
index 234a5c516..2d4bf9363 100644
--- a/tests/e2e/manifests/chi/test-011-secrets.yaml
+++ b/tests/e2e/manifests/chi/test-011-secrets.yaml
@@ -29,8 +29,18 @@ spec:
secretKeyRef:
name: test-011-secret
key: pwduser5
-
settings:
+ custom_settings_prefixes: custom_
+ profiles/default/custom_1234567890_1234567890_1234567890_1234567890_0:
+ valueFrom:
+ secretKeyRef:
+ name: test-011-secret
+ key: custom0
+ profiles/default/custom_1234567890_1234567890_1234567890_1234567890_1:
+ valueFrom:
+ secretKeyRef:
+ name: test-011-secret
+ key: custom1
kafka/sasl_username:
valueFrom:
secretKeyRef:
diff --git a/tests/e2e/manifests/chi/test-011-secured-cluster-1.yaml b/tests/e2e/manifests/chi/test-011-secured-cluster-1.yaml
index 9dc7cc489..a67e94e34 100644
--- a/tests/e2e/manifests/chi/test-011-secured-cluster-1.yaml
+++ b/tests/e2e/manifests/chi/test-011-secured-cluster-1.yaml
@@ -37,7 +37,7 @@ spec:
user4/access_management: 1
# clickhouse_operator password should be accepted and encrypted
- clickhuse_operator/password: operator_secret
+ clickhouse_operator/password: operator_secret
clusters:
- name: default
layout:
diff --git a/tests/e2e/manifests/chi/test-011-secured-cluster-2.yaml b/tests/e2e/manifests/chi/test-011-secured-cluster-2.yaml
index 30b287efc..15d3ed02a 100644
--- a/tests/e2e/manifests/chi/test-011-secured-cluster-2.yaml
+++ b/tests/e2e/manifests/chi/test-011-secured-cluster-2.yaml
@@ -37,7 +37,7 @@ spec:
user4/access_management: 1
# clickhouse_operator password should be accepted and encrypted
- clickhuse_operator/password: operator_secret
+ clickhouse_operator/password: operator_secret
clusters:
- name: default
layout:
diff --git a/tests/e2e/manifests/chi/test-040-startup-probe.yaml b/tests/e2e/manifests/chi/test-040-startup-probe.yaml
new file mode 100644
index 000000000..6cdcb6b0d
--- /dev/null
+++ b/tests/e2e/manifests/chi/test-040-startup-probe.yaml
@@ -0,0 +1,50 @@
+apiVersion: "clickhouse.altinity.com/v1"
+
+kind: "ClickHouseInstallation"
+
+metadata:
+ name: test-040-startup-probe
+
+spec:
+ reconcile:
+ host:
+ wait:
+ probes:
+ startup: "yes"
+ readiness: "no"
+ defaults:
+ templates:
+ podTemplate: not-ready-replica
+ configuration:
+ clusters:
+ - name: default
+ templates:
+ podTemplates:
+ - name: not-ready-replica
+ spec:
+ containers:
+ - name: clickhouse-pod
+ image: clickhouse/clickhouse-server:24.8
+ # startupProbe:
+ # httpGet:
+ # path: /ping
+ # port: 8123
+ # scheme: HTTP
+ # initialDelaySeconds: 10
+ # periodSeconds: 3
+ # successThreshold: 1
+ # failureThreshold: 10
+ readinessProbe:
+ httpGet:
+ path: "/?query=select%20throwIf(uptime()<120)"
+ port: 8123
+ scheme: HTTP
+ httpHeaders:
+ - name: X-ClickHouse-User
+ value: probe
+ - name: X-ClickHouse-Key
+ value: probe
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ failureThreshold: 5
+
diff --git a/tests/e2e/manifests/chit/test-023-auto-templates-4.yaml b/tests/e2e/manifests/chit/test-023-auto-templates-4.yaml
new file mode 100644
index 000000000..c9ae2e7cc
--- /dev/null
+++ b/tests/e2e/manifests/chit/test-023-auto-templates-4.yaml
@@ -0,0 +1,12 @@
+# test_023
+
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallationTemplate"
+
+metadata:
+ name: set-labels
+ labels:
+ my-label: test
+spec:
+ templating:
+ policy: auto
diff --git a/tests/e2e/manifests/secret/test-011-secret.yaml b/tests/e2e/manifests/secret/test-011-secret.yaml
index 6b2c0dba1..87018de37 100644
--- a/tests/e2e/manifests/secret/test-011-secret.yaml
+++ b/tests/e2e/manifests/secret/test-011-secret.yaml
@@ -11,4 +11,7 @@ stringData:
pwduser5: pwduser5
KAFKA_SASL_USERNAME: test_secret
- KAFKA_SASL_PASSWORD: test_secret
\ No newline at end of file
+ KAFKA_SASL_PASSWORD: test_secret
+
+ custom0: custom0
+ custom1: custom1
\ No newline at end of file
diff --git a/tests/e2e/steps.py b/tests/e2e/steps.py
index f0865fd1c..22881fa1d 100644
--- a/tests/e2e/steps.py
+++ b/tests/e2e/steps.py
@@ -158,8 +158,7 @@ def check_metrics_monitoring(
port="8888",
max_retries=7
):
- with Then(f"metrics-exporter /metrics endpoint result should contain"):
- print(f"expect: '{expect_pattern}', '{expect_metric}', '{expect_labels}'")
+ with Then(f"metrics-exporter /metrics endpoint result should contain {expect_pattern}{expect_metric}"):
expected_pattern_found = False
for i in range(1, max_retries):
url_cmd = util.make_http_get_request("127.0.0.1", port, "/metrics")
diff --git a/tests/e2e/test_metrics_exporter.py b/tests/e2e/test_metrics_exporter.py
index 4e6f4c52c..923abd08a 100644
--- a/tests/e2e/test_metrics_exporter.py
+++ b/tests/e2e/test_metrics_exporter.py
@@ -9,6 +9,8 @@
import e2e.kubectl as kubectl
import e2e.settings as settings
import e2e.util as util
+import e2e.clickhouse as clickhouse
+import e2e.steps as steps
@TestScenario
@@ -46,6 +48,7 @@ def check_monitoring_chi(operator_namespace, operator_pod, expect_result, max_re
with Then("Not ready. Wait for " + str(i * 5) + " seconds"):
time.sleep(i * 5)
assert out == expect_result, error()
+
def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, max_retries=10):
with Then(f"metrics-exporter /metrics endpoint result should match with {expect_result}"):
found = 0
@@ -120,6 +123,11 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma
]
}
]
+
+ with Then("Add system.custom_metrics"):
+ clickhouse.query("test-017-multi-version", "CREATE VIEW system.custom_metrics as SELECT 'MyCustomMetric' as metric, 1 as value")
+
+
with Then("Check both pods are monitored"):
check_monitoring_chi(self.context.operator_namespace, operator_pod, expected_chi)
labels = ','.join([
@@ -127,17 +135,25 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma
'clickhouse_altinity_com_chi="test-017-multi-version"',
'clickhouse_altinity_com_email="myname@mydomain.com, yourname@yourdoman.com"'
])
+
with Then("Check not empty /metrics"):
- check_monitoring_metrics(
- self.context.operator_namespace,
- operator_pod,
- expect_result={
- "# HELP chi_clickhouse_metric_VersionInteger": True,
- "# TYPE chi_clickhouse_metric_VersionInteger gauge": True,
- "chi_clickhouse_metric_VersionInteger{" + labels +",hostname=\"chi-test-017-multi-version-default-0-0": True,
- "chi_clickhouse_metric_VersionInteger{" + labels +",hostname=\"chi-test-017-multi-version-default-1-0": True,
- },
- )
+ check_monitoring_metrics(
+ self.context.operator_namespace,
+ operator_pod,
+ expect_result={
+ "# HELP chi_clickhouse_metric_VersionInteger": True,
+ "# TYPE chi_clickhouse_metric_VersionInteger gauge": True,
+ "chi_clickhouse_metric_VersionInteger{" + labels +",hostname=\"chi-test-017-multi-version-default-0-0": True,
+ "chi_clickhouse_metric_VersionInteger{" + labels +",hostname=\"chi-test-017-multi-version-default-1-0": True,
+ },
+ )
+
+ with Then("Check that custom_metrics is properly monitored"):
+ steps.check_metrics_monitoring(
+ operator_namespace = self.context.operator_namespace,
+ operator_pod = operator_pod,
+ expect_pattern="^chi_clickhouse_metric_MyCustomMetric{(.*?)} 1$"
+ )
with When("reboot metrics exporter"):
kubectl.launch(f"exec -n {self.context.operator_namespace} {operator_pod} -c metrics-exporter -- bash -c 'kill 1'")
diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py
index 1306bb144..7ed21610f 100644
--- a/tests/e2e/test_operator.py
+++ b/tests/e2e/test_operator.py
@@ -971,21 +971,31 @@ def test_010011_3(self):
user5_password_env = ""
sasl_username_env = ""
sasl_password_env = ""
+ custom0_env = ""
+ custom1_env = ""
for e in envs:
if "valueFrom" in e:
+ print(e["name"])
if e["valueFrom"]["secretKeyRef"]["key"] == "KAFKA_SASL_USERNAME":
sasl_username_env = e["name"]
if e["valueFrom"]["secretKeyRef"]["key"] == "KAFKA_SASL_PASSWORD":
sasl_password_env = e["name"]
if e["valueFrom"]["secretKeyRef"]["key"] == "pwduser5":
user5_password_env = e["name"]
+ if e["valueFrom"]["secretKeyRef"]["key"] == "custom0":
+ custom0_env = e["name"]
+ if e["valueFrom"]["secretKeyRef"]["key"] == "custom1":
+ custom1_env = e["name"]
with By("Secrets are properly propagated to env variables"):
- print(f"Found env variables: {sasl_username_env} {sasl_password_env} {user5_password_env}")
assert sasl_username_env != ""
assert sasl_password_env != ""
assert user5_password_env != ""
+ with By("Secrets are properly propagated to env variables for long settings names", flags=XFAIL):
+ assert custom0_env != ""
+ assert custom1_env != ""
+
with By("Secrets are properly referenced from settings.xml"):
cfm = kubectl.get("configmap", f"chi-{chi}-common-configd")
settings_xml = cfm["data"]["chop-generated-settings.xml"]
@@ -997,7 +1007,6 @@ def test_010011_3(self):
users_xml = cfm["data"]["chop-generated-users.xml"]
env_matches = [from_env.strip() for from_env in users_xml.splitlines() if "from_env" in from_env]
print(f"Found env substitutions: {env_matches}")
- time.sleep(5)
assert f"password from_env=\"{user5_password_env}\"" in users_xml
kubectl.delete_chi(chi)
@@ -2661,6 +2670,7 @@ def test_010023(self):
kubectl.apply(util.get_full_path("manifests/chit/test-023-auto-templates-1.yaml"))
kubectl.apply(util.get_full_path("manifests/chit/test-023-auto-templates-2.yaml"))
kubectl.apply(util.get_full_path("manifests/chit/test-023-auto-templates-3.yaml"))
+ kubectl.apply(util.get_full_path("manifests/chit/test-023-auto-templates-4.yaml"))
kubectl.apply(util.get_full_path("manifests/secret/test-023-secret.yaml"))
with Given("Give templates some time to be applied"):
time.sleep(15)
@@ -2680,6 +2690,7 @@ def test_010023(self):
assert kubectl.get_field("chi", chi, ".status.usedTemplates[0].name") == "clickhouse-stable"
assert kubectl.get_field("chi", chi, ".status.usedTemplates[1].name") == "extension-annotations"
assert kubectl.get_field("chi", chi, ".status.usedTemplates[2].name") == "grafana-dashboard-user"
+ assert kubectl.get_field("chi", chi, ".status.usedTemplates[3].name") == "set-labels"
# assert kubectl.get_field("chi", chi, ".status.usedTemplates[2].name") == ""
chi_spec = kubectl.get("chi", chi)
@@ -2721,6 +2732,12 @@ def checkEnv(pos, env_name, env_value):
out = clickhouse.query_with_error(chi, "select 1", user = "grafana_dashboard_user", pwd = "grafana_dashboard_user_password")
assert out == "1"
+ with Then("Label from a template should be populated"):
+ normalizedCompleted = kubectl.get_chi_normalizedCompleted(chi)
+ assert normalizedCompleted["metadata"]["labels"]["my-label"] == "test"
+ with Then("Pod label should populated from template"):
+ assert kubectl.get_field("pod", f"chi-{chi}-single-0-0-0", ".metadata.labels.my-label") == "test"
+
with Given("Two selector templates are deployed"):
kubectl.apply(util.get_full_path("manifests/chit/tpl-clickhouse-selector-1.yaml"))
kubectl.apply(util.get_full_path("manifests/chit/tpl-clickhouse-selector-2.yaml"))
@@ -4179,7 +4196,7 @@ def test_010040(self):
kubectl.apply(util.get_full_path("manifests/chit/tpl-startup-probe.yaml"))
kubectl.create_and_check(
- manifest="manifests/chi/test-005-acm.yaml",
+ manifest = manifest,
check={
"pod_count": 1,
"pod_volumes": {
@@ -4204,6 +4221,43 @@ def test_010040(self):
delete_test_namespace()
+@TestScenario
+@Name("test_010040_1. Inject a startup probe using a reconcile setting")
+def test_010040_1(self):
+
+ create_shell_namespace_clickhouse_template()
+
+ manifest = "manifests/chi/test-040-startup-probe.yaml"
+ chi = yaml_manifest.get_name(util.get_full_path(manifest))
+
+ kubectl.create_and_check(
+ manifest=manifest,
+ check={
+ "pod_count": 1,
+ "do_not_delete": 1,
+ },
+ )
+
+ with Then("Startup probe should be defined"):
+ assert "startupProbe" in kubectl.get_pod_spec(chi)["containers"][0]
+
+ with Then("Readiness probe should be defined"):
+ assert "readinessProbe" in kubectl.get_pod_spec(chi)["containers"][0]
+
+ with Then("uptime() should be less than 120 seconds as defined by a readiness probe"):
+ out = clickhouse.query(chi, "select uptime()")
+ print(f"clickhouse uptime: {out}")
+ assert int(out) < 120
+
+ with Then("Pod should be not ready"):
+ ready = kubectl.get_pod_status_full(chi)["containerStatuses"][0]["ready"]
+ print(f"ready: {ready}")
+ assert ready is not True
+
+ with Finally("I clean up"):
+ delete_test_namespace()
+
+
@TestScenario
@Name("test_010041. Secure zookeeper")
def test_010041(self):
@@ -5114,7 +5168,7 @@ def test_010057(self):
},
)
- with When("Last first shard is Running"):
+ with When("First shard is Running"):
kubectl.wait_pod_status(f"chi-{chi}-{cluster}-0-0-0", "Running")
with Then("Other shards are running or being created"):
for shard in [1,2,3]:
@@ -5422,7 +5476,7 @@ def test_020003(self):
chi = yaml_manifest.get_name(util.get_full_path(manifest))
cluster = "default"
keeper_version_from = "24.8"
- keeper_version_to = "24.9"
+ keeper_version_to = "25.3"
with Given("CHI with 2 replicas"):
kubectl.create_and_check(
manifest=manifest,
@@ -5432,6 +5486,9 @@ def test_020003(self):
},
)
+ with And("Make sure Keeper is ready"):
+ kubectl.wait_chk_status('clickhouse-keeper', 'Completed')
+
check_replication(chi, {0, 1}, 1)
with When(f"I check clickhouse-keeper version is {keeper_version_from}"):
diff --git a/tests/requirements/requirements.md b/tests/requirements/requirements.md
index 8c84ab103..8ea4c4a46 100644
--- a/tests/requirements/requirements.md
+++ b/tests/requirements/requirements.md
@@ -3910,7 +3910,7 @@ spec:
###### RQ.SRS-026.ClickHouseOperator.CustomResource.Spec.Templates.HostTemplates.Spec.Name
version: 1.0
-[ClickHouse Operator] SHALL support specifying custom host name for each `clickhuse-server`
+[ClickHouse Operator] SHALL support specifying custom host name for each `clickhouse-server`
using `.spec.templates.hostTemplates[].spec.name` [string] type property
with the minimum length of `1` and maximum length of `15` matching
`"^[a-zA-Z0-9-]{0,15}$"` pattern.
diff --git a/tests/requirements/requirements.py b/tests/requirements/requirements.py
index aedfff55c..fa955d5dc 100644
--- a/tests/requirements/requirements.py
+++ b/tests/requirements/requirements.py
@@ -4881,7 +4881,7 @@
type=None,
uid=None,
description=(
- '[ClickHouse Operator] SHALL support specifying custom host name for each `clickhuse-server`\n'
+ '[ClickHouse Operator] SHALL support specifying custom host name for each `clickhouse-server`\n'
'using `.spec.templates.hostTemplates[].spec.name` [string] type property\n'
'with the minimum length of `1` and maximum length of `15` matching\n'
'`"^[a-zA-Z0-9-]{0,15}$"` pattern.\n'
@@ -11437,7 +11437,7 @@
###### RQ.SRS-026.ClickHouseOperator.CustomResource.Spec.Templates.HostTemplates.Spec.Name
version: 1.0
-[ClickHouse Operator] SHALL support specifying custom host name for each `clickhuse-server`
+[ClickHouse Operator] SHALL support specifying custom host name for each `clickhouse-server`
using `.spec.templates.hostTemplates[].spec.name` [string] type property
with the minimum length of `1` and maximum length of `15` matching
`"^[a-zA-Z0-9-]{0,15}$"` pattern.